You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@metron.apache.org by rm...@apache.org on 2018/04/27 19:29:41 UTC

[01/50] [abbrv] metron git commit: METRON-1490: Better error message when user specifies an enrichment type that doesn't exist closes apache/metron#963

Repository: metron
Updated Branches:
  refs/heads/feature/METRON-1416-upgrade-solr f8d7843e9 -> d0a4e4c0f


METRON-1490: Better error message when user specifies an enrichment type that doesn't exist closes apache/metron#963


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/37662d3b
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/37662d3b
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/37662d3b

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 37662d3bd8a4162bef4474be7ab60a2fabbe3e2f
Parents: 03a4aa3
Author: cstella <ce...@gmail.com>
Authored: Thu Mar 15 12:05:39 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Thu Mar 15 12:05:39 2018 -0400

----------------------------------------------------------------------
 .../enrichment/parallel/ParallelEnricher.java   |  5 +++
 .../parallel/ParallelEnricherTest.java          | 39 +++++++++++++++++++-
 2 files changed, 43 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/37662d3b/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java b/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java
index 2238c92..6ddb892 100644
--- a/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java
+++ b/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java
@@ -18,6 +18,7 @@
 package org.apache.metron.enrichment.parallel;
 
 import com.github.benmanes.caffeine.cache.stats.CacheStats;
+import com.google.common.base.Joiner;
 import org.apache.metron.common.Constants;
 import org.apache.metron.common.configuration.enrichment.SensorEnrichmentConfig;
 import org.apache.metron.common.configuration.enrichment.handler.ConfigHandler;
@@ -152,6 +153,10 @@ public class ParallelEnricher {
     for(Map.Entry<String, List<JSONObject>> task : tasks.entrySet()) {
       //task is the list of enrichment tasks for the task.getKey() adapter
       EnrichmentAdapter<CacheKey> adapter = enrichmentsByType.get(task.getKey());
+      if(adapter == null) {
+        throw new IllegalStateException("Unable to find an adapter for " + task.getKey()
+                + ", possible adapters are: " + Joiner.on(",").join(enrichmentsByType.keySet()));
+      }
       for(JSONObject m : task.getValue()) {
         /* now for each unit of work (each of these only has one element in them)
          * the key is the field name and the value is value associated with that field.

http://git-wip-us.apache.org/repos/asf/metron/blob/37662d3b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java
index c3a3109..4a4573b 100644
--- a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java
+++ b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java
@@ -17,6 +17,7 @@
  */
 package org.apache.metron.enrichment.parallel;
 
+import com.google.common.base.Joiner;
 import com.google.common.collect.ImmutableMap;
 import org.adrianwalker.multilinestring.Multiline;
 import org.apache.metron.common.Constants;
@@ -24,6 +25,7 @@ import org.apache.metron.common.configuration.enrichment.SensorEnrichmentConfig;
 import org.apache.metron.common.utils.JSONUtils;
 import org.apache.metron.enrichment.adapters.stellar.StellarAdapter;
 import org.apache.metron.enrichment.bolt.CacheKey;
+import org.apache.metron.enrichment.interfaces.EnrichmentAdapter;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.stellar.dsl.StellarFunctions;
 import org.json.simple.JSONObject;
@@ -32,6 +34,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 import java.util.HashMap;
+import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
 public class ParallelEnricherTest {
@@ -61,6 +64,7 @@ public class ParallelEnricherTest {
   private static ParallelEnricher enricher;
   private static Context stellarContext;
   private static AtomicInteger numAccesses = new AtomicInteger(0);
+  private static Map<String, EnrichmentAdapter<CacheKey>> enrichmentsByType;
   @BeforeClass
   public static void setup() {
     ConcurrencyContext infrastructure = new ConcurrencyContext();
@@ -75,7 +79,8 @@ public class ParallelEnricherTest {
       }
     }.ofType("ENRICHMENT");
     adapter.initializeAdapter(new HashMap<>());
-    enricher = new ParallelEnricher(ImmutableMap.of("stellar", adapter), infrastructure, false);
+    enrichmentsByType = ImmutableMap.of("stellar", adapter);
+    enricher = new ParallelEnricher(enrichmentsByType, infrastructure, false);
   }
 
   @Test
@@ -154,4 +159,36 @@ public class ParallelEnricherTest {
     Assert.assertEquals("TEST", ret.get("ALL_CAPS"));
     Assert.assertEquals(1, result.getEnrichmentErrors().size());
   }
+
+  /**
+   * {
+  "enrichment": {
+    "fieldMap": {
+      "hbaseThreatIntel" : [ "ip_src_addr"]
+      }
+    ,"fieldToTypeMap": { }
+  },
+  "threatIntel": { }
+}
+   */
+  @Multiline
+  public static String badConfigWrongEnrichmentType;
+
+  @Test
+  public void testBadConfigWrongEnrichmentType() throws Exception {
+    SensorEnrichmentConfig config = JSONUtils.INSTANCE.load(badConfigWrongEnrichmentType, SensorEnrichmentConfig.class);
+    config.getConfiguration().putIfAbsent("stellarContext", stellarContext);
+    JSONObject message = new JSONObject() {{
+      put(Constants.SENSOR_TYPE, "test");
+    }};
+    try {
+      enricher.apply(message, EnrichmentStrategies.ENRICHMENT, config, null);
+      Assert.fail("This is an invalid config, we should have failed.");
+    }
+    catch(IllegalStateException ise) {
+      Assert.assertEquals(ise.getMessage()
+              , "Unable to find an adapter for hbaseThreatIntel, possible adapters are: " + Joiner.on(",").join(enrichmentsByType.keySet())
+      );
+    }
+  }
 }


[34/50] [abbrv] metron git commit: Add support for Vagrant Cachier plugin if present (simonellistonball via mmiklavc) closes apache/metron#993

Posted by rm...@apache.org.
Add support for Vagrant Cachier plugin if present (simonellistonball via mmiklavc) closes apache/metron#993


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/53124d97
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/53124d97
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/53124d97

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 53124d97d09baafcf6d84eb291f6a3828289ec7c
Parents: eb5b2d4
Author: simonellistonball <si...@simonellistonball.com>
Authored: Fri Apr 13 09:30:54 2018 -0600
Committer: Michael Miklavcic <mi...@gmail.com>
Committed: Fri Apr 13 09:30:54 2018 -0600

----------------------------------------------------------------------
 metron-deployment/development/README.md            |  5 +++++
 metron-deployment/development/centos6/Vagrantfile  | 11 +++++++++++
 metron-deployment/development/ubuntu14/Vagrantfile | 11 +++++++++++
 3 files changed, 27 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/53124d97/metron-deployment/development/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/development/README.md b/metron-deployment/development/README.md
index bc99809..2a04e5f 100644
--- a/metron-deployment/development/README.md
+++ b/metron-deployment/development/README.md
@@ -22,3 +22,8 @@ This directory contains environments useful for Metron developers.  These enviro
 * Metron running on CentOS 6
 * Metron running on Ubuntu 14
 * Fastcapa
+
+
+## Vagrant Cachier recommendations
+
+The development boxes are designed to be spun up and destroyed on a regular basis as part of the development cycle. In order to avoid the overhead of re-downloading many of the heavy platform dependencies, Vagrant can use the [vagrant-cachier](http://fgrehm.viewdocs.io/vagrant-cachier/) plugin to store package caches between builds. If the plugin has been installed to your vagrant it will be used, and packages will be cached in ~/.vagrant/cache.

http://git-wip-us.apache.org/repos/asf/metron/blob/53124d97/metron-deployment/development/centos6/Vagrantfile
----------------------------------------------------------------------
diff --git a/metron-deployment/development/centos6/Vagrantfile b/metron-deployment/development/centos6/Vagrantfile
index 101a2dd..d0b7051 100644
--- a/metron-deployment/development/centos6/Vagrantfile
+++ b/metron-deployment/development/centos6/Vagrantfile
@@ -60,6 +60,17 @@ Vagrant.configure(2) do |config|
   config.hostmanager.enabled = true
   config.hostmanager.manage_host = true
 
+  # enable vagrant cachier if present
+  if Vagrant.has_plugin?("vagrant-cachier")
+    config.cache.enable :yum
+    config.cache.scope = :box
+
+    config.cache.synced_folder_opts = {
+      type: :nfs,
+      mount_options: ['rw', 'vers=3', 'tcp', 'nolock']
+    }
+  end
+
   # host definition
   hosts.each_with_index do |host, index|
     config.vm.define host[:hostname] do |node|

http://git-wip-us.apache.org/repos/asf/metron/blob/53124d97/metron-deployment/development/ubuntu14/Vagrantfile
----------------------------------------------------------------------
diff --git a/metron-deployment/development/ubuntu14/Vagrantfile b/metron-deployment/development/ubuntu14/Vagrantfile
index 01c0d17..cfa3cdf 100644
--- a/metron-deployment/development/ubuntu14/Vagrantfile
+++ b/metron-deployment/development/ubuntu14/Vagrantfile
@@ -60,6 +60,17 @@ Vagrant.configure(2) do |config|
   config.hostmanager.enabled = true
   config.hostmanager.manage_host = true
 
+  # enable vagrant cachier if present
+  if Vagrant.has_plugin?("vagrant-cachier")
+    config.cache.enable :apt
+    config.cache.scope = :box
+
+    config.cache.synced_folder_opts = {
+      type: :nfs,
+      mount_options: ['rw', 'vers=3', 'tcp', 'nolock']
+    }
+  end
+
   # host definition
   hosts.each_with_index do |host, index|
     config.vm.define host[:hostname] do |node|


[06/50] [abbrv] metron git commit: METRON-1493 Unhelpful Error Message When Assignment Expressions Fail (nickwallen) closes apache/metron#966

Posted by rm...@apache.org.
METRON-1493 Unhelpful Error Message When Assignment Expressions Fail (nickwallen) closes apache/metron#966


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/52dd9fb8
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/52dd9fb8
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/52dd9fb8

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 52dd9fb852bef5998dac83109ac6e122860be489
Parents: 9c5d9d7
Author: nickwallen <ni...@nickallen.org>
Authored: Fri Mar 16 10:16:07 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Fri Mar 16 10:16:07 2018 -0400

----------------------------------------------------------------------
 .../common/shell/specials/AssignmentCommand.java      |  2 +-
 .../common/shell/specials/AssignmentCommandTest.java  | 14 ++++++++++++++
 2 files changed, 15 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/52dd9fb8/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommand.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommand.java b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommand.java
index e253b3b..664e01e 100644
--- a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommand.java
+++ b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommand.java
@@ -79,7 +79,7 @@ public class AssignmentCommand implements SpecialCommand {
       return result;
 
     } else {
-      return error("Assignment expression failed");
+      return result;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/52dd9fb8/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommandTest.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommandTest.java b/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommandTest.java
index 899effb..1b5c9d5 100644
--- a/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommandTest.java
+++ b/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/specials/AssignmentCommandTest.java
@@ -149,6 +149,20 @@ public class AssignmentCommandTest {
     assertFalse(executor.getState().containsKey("x"));
   }
 
+  /**
+   * If an assignment expression fails, the error message should explain
+   * why the expression fails.
+   */
+  @Test
+  public void testErrorMessageWhenAssignmentFails() {
+    StellarResult result = command.execute("x := 0/0", executor);
+
+    // validate the result
+    assertTrue(result.isError());
+    assertTrue(result.getException().isPresent());
+    assertEquals(ArithmeticException.class, result.getException().get().getClass());
+  }
+
   @Test
   public void testAssignNull() {
     StellarResult result = command.execute("x := NULL", executor);


[42/50] [abbrv] metron git commit: METRON-1502 Upgrade Doxia plugin to 1.8 (justinleet) closes apache/metron#974

Posted by rm...@apache.org.
METRON-1502 Upgrade Doxia plugin to 1.8 (justinleet) closes apache/metron#974


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/daf543b1
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/daf543b1
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/daf543b1

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: daf543b10423c2cfc2b740e39647ef4eb3863872
Parents: 08252f5
Author: justinleet <ju...@gmail.com>
Authored: Tue Apr 17 13:32:23 2018 -0400
Committer: leet <le...@apache.org>
Committed: Tue Apr 17 13:32:23 2018 -0400

----------------------------------------------------------------------
 metron-deployment/amazon-ec2/README.md          |  88 +++++-----
 metron-deployment/packaging/ambari/README.md    | 168 +++++++++----------
 .../packaging/packer-build/README.md            |   2 +-
 metron-interface/metron-rest/README.md          |  56 +++----
 metron-platform/metron-enrichment/README.md     |   2 +-
 metron-sensors/pycapa/README.md                 |  84 +++++-----
 site-book/pom.xml                               |   4 +-
 .../src-resources/templates/site.xml.template   |   6 +-
 8 files changed, 206 insertions(+), 204 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/daf543b1/metron-deployment/amazon-ec2/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/amazon-ec2/README.md b/metron-deployment/amazon-ec2/README.md
index 73a3d70..b2efc9e 100644
--- a/metron-deployment/amazon-ec2/README.md
+++ b/metron-deployment/amazon-ec2/README.md
@@ -46,39 +46,39 @@ Any platform that supports these tools is suitable, but the following instructio
 
 1. Install Homebrew by running the following command in a terminal.  Refer to the  [Homebrew](http://brew.sh/) home page for the latest installation instructions.
 
-  ```
-  /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
-  ```
+    ```
+    /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
+    ```
 
 2. With Homebrew installed, run the following command in a terminal to install all of the required tools and dependencies.
 
-  ```
-  brew update
-  brew tap caskroom/versions
-  brew cask install java8 vagrant virtualbox
-  brew install maven git node
-  ```
+    ```
+    brew update
+    brew tap caskroom/versions
+    brew cask install java8 vagrant virtualbox
+    brew install maven git node
+    ```
 
 3. Install Ansible by following the instructions [here](http://docs.ansible.com/ansible/intro_installation.html#latest-releases-via-pip).
 
 4. Ensure that a public SSH key is located at `~/.ssh/id_rsa.pub`.
 
-  ```
-  $ cat ~/.ssh/id_rsa.pub
-  ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQChv5GJxPjR39UJV7VY17ivbLVlxFrH7UHwh1Jsjem4d1eYiAtde5N2y65/HRNxWbhYli9ED8k0/MRP92ejewucEbrPNq5mytPqdC4IvZ98Ln2GbqTDwvlP3T7xa/wYFOpFsOmXXql8216wSrnrS4f3XK7ze34S6/VmY+lsBYnr3dzyj8sG/mexpJgFS/w83mWJV0e/ryf4Hd7P6DZ5fO+nmTXfKNK22ga4ctcnbZ+toYcPL+ODCh8598XCKVo97XjwF5OxN3vl1p1HHguo3cHB4H1OIaqX5mUt59gFIZcAXUME89PO6NUiZDd3RTstpf125nQVkQAHu2fvW96/f037 nick@localhost
-  ```
+    ```
+    $ cat ~/.ssh/id_rsa.pub
+    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQChv5GJxPjR39UJV7VY17ivbLVlxFrH7UHwh1Jsjem4d1eYiAtde5N2y65/HRNxWbhYli9ED8k0/MRP92ejewucEbrPNq5mytPqdC4IvZ98Ln2GbqTDwvlP3T7xa/wYFOpFsOmXXql8216wSrnrS4f3XK7ze34S6/VmY+lsBYnr3dzyj8sG/mexpJgFS/w83mWJV0e/ryf4Hd7P6DZ5fO+nmTXfKNK22ga4ctcnbZ+toYcPL+ODCh8598XCKVo97XjwF5OxN3vl1p1HHguo3cHB4H1OIaqX5mUt59gFIZcAXUME89PO6NUiZDd3RTstpf125nQVkQAHu2fvW96/f037 nick@localhost
+    ```
 
-  If this file does not exist, run the following command at a terminal and accept all defaults.  Only the public key, not the private key, will be uploaded to Amazon and configured on each host to enable SSH connectivity.  While it is possible to create and use an alternative key those details will not be covered.  
+    If this file does not exist, run the following command at a terminal and accept all defaults.  Only the public key, not the private key, will be uploaded to Amazon and configured on each host to enable SSH connectivity.  While it is possible to create and use an alternative key those details will not be covered.  
 
-  ```
-  ssh-keygen -t rsa
-  ```
+    ```
+    ssh-keygen -t rsa
+    ```
 
 5. Ensure the JAVA_HOME environment variable is set
 
-   ```
-   export JAVA_HOME="/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home"
-   ```
+    ```
+    export JAVA_HOME="/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home"
+    ```
 
    Notice: You must replace the path with the installed JDK version path
 
@@ -103,20 +103,20 @@ Having successfully created your Amazon Web Services account, hopefully you will
 
 1. Use the Amazon access key by exporting its values via the shell's environment.  This allows Ansible to authenticate with Amazon EC2.  For example:
 
-  ```
-  export AWS_ACCESS_KEY_ID="AKIAI6NRFEO27E5FFELQ"
-  export AWS_SECRET_ACCESS_KEY="vTDydWJQnAer7OWauUS150i+9Np7hfCXrrVVP6ed"
-  ```
+    ```
+    export AWS_ACCESS_KEY_ID="AKIAI6NRFEO27E5FFELQ"
+    export AWS_SECRET_ACCESS_KEY="vTDydWJQnAer7OWauUS150i+9Np7hfCXrrVVP6ed"
+    ```
 
   Notice: You must replace the access key values above with values from your own access key.
 
 2. Start the Apache Metron deployment process.  When prompted provide a unique name for your Metron environment or accept the default.  
 
-  ```
-  $ ./run.sh
-  Metron Environment [metron-test]: my-metron-env
-  ...
-  ```
+    ```
+    $ ./run.sh
+    Metron Environment [metron-test]: my-metron-env
+    ...
+    ```
 
   The process is likely to take between 70-90 minutes.  Fortunately, everything is fully automated and you should feel free to grab a coffee.
 
@@ -124,24 +124,24 @@ Having successfully created your Amazon Web Services account, hopefully you will
 
 1. After the deployment has completed successfully, a message like the following will be displayed.  Navigate to the specified resources to explore your newly minted Apache Metron environment.
 
-  ```
-  TASK [debug] *******************************************************************
-  ok: [localhost] => {
-      "Success": [
-          "Apache Metron deployed successfully",
-          "   Metron  @  http://ec2-52-37-255-142.us-west-2.compute.amazonaws.com:5000",
-          "   Ambari  @  http://ec2-52-37-225-202.us-west-2.compute.amazonaws.com:8080",
-          "   Sensors @  ec2-52-37-225-202.us-west-2.compute.amazonaws.com on tap0",
-          "For additional information, see https://metron.apache.org/'"
-      ]
-  }
-  ```
+    ```
+    TASK [debug] *******************************************************************
+    ok: [localhost] => {
+        "Success": [
+            "Apache Metron deployed successfully",
+            "   Metron  @  http://ec2-52-37-255-142.us-west-2.compute.amazonaws.com:5000",
+            "   Ambari  @  http://ec2-52-37-225-202.us-west-2.compute.amazonaws.com:8080",
+            "   Sensors @  ec2-52-37-225-202.us-west-2.compute.amazonaws.com on tap0",
+            "For additional information, see https://metron.apache.org/'"
+        ]
+    }
+    ```
 
 2. Each of the provisioned hosts will be accessible from the internet. Connecting to one over SSH as the user `centos` will not require a password as it will authenticate with the pre-defined SSH key.  
 
-  ```
-  ssh centos@ec2-52-91-215-174.compute-1.amazonaws.com
-  ```
+    ```
+    ssh centos@ec2-52-91-215-174.compute-1.amazonaws.com
+    ```
 
 Advanced Usage
 --------------

http://git-wip-us.apache.org/repos/asf/metron/blob/daf543b1/metron-deployment/packaging/ambari/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/README.md b/metron-deployment/packaging/ambari/README.md
index 52c7570..410c403 100644
--- a/metron-deployment/packaging/ambari/README.md
+++ b/metron-deployment/packaging/ambari/README.md
@@ -107,14 +107,14 @@ by Ambari:
 
 ## Adding a new property
 1. Add the property to the appropriate `*-env.xml` file found in `METRON.CURRENT/configuration`.
-  ```
+    ```
     <property>
         <name>new_property</name>
         <description>New Property description</description>
         <value>Default Value</value>
         <display-name>New Property Pretty Name</display-name>
     </property>
-  ```
+    ```
 The appropriate `*-env.xml` file should be selected based on which component depends on the property. This allows Ambari to accurately restart only the affected components when the property is changed. If a property is in `metron-env.xml`, Ambari will prompt you to restart all Metron components.
 
 2. Add the property to the `metron_theme.json` file found in `METRON.CURRENT/themes` if the property was added to a component-specific `*-env.xml` file (`metron-parsers-env.xml` for example) and not `metron-env.xml`.
@@ -122,28 +122,28 @@ This is necessary for the property to be displayed in the correct tab of the Met
 
 3. Reference the property in `METRON.CURRENT/package/scriptes/params/params_linux.py`, unless it will be used in Ambari's status command. It will be stored in a variable. The name doesn't have to match, but it's preferred that it does.
 Make sure to use replace `metron-env` the correct `*-env` file, as noted above.
-  ```
-  new_property = config['configurations']['metron-env']['new_property']
-  ```
-If this property will be used in the status command, instead make this change in `METRON.CURRENT/package/scriptes/params/status_params.py`.
-Afterwards, in `params_linux.py`, reference the new property:
-  ```
-  new_property = status_params.new_property
-  ```
-This behavior is because Ambari doesn't send all parameters to the status, so it needs to be explicitly provided. Also note that status_params.py parameters are not automatically pulled into the params_linux.py namespace, so we explicitly choose the variables to include.
+    ```
+    new_property = config['configurations']['metron-env']['new_property']
+    ```
+  If this property will be used in the status command, instead make this change in `METRON.CURRENT/package/scriptes/params/status_params.py`.
+  Afterwards, in `params_linux.py`, reference the new property:
+    ```
+    new_property = status_params.new_property
+    ```
+  This behavior is because Ambari doesn't send all parameters to the status, so it needs to be explicitly provided. Also note that status_params.py parameters are not automatically pulled into the params_linux.py namespace, so we explicitly choose the variables to include.
  See https://docs.python.org/2/howto/doanddont.html#at-module-level for more info.
 
 4. Ambari master services can then import the params:
 
-  ```
-  from params import params
-  env.set_params(params)
-  ```
+    ```
+    from params import params
+    env.set_params(params)
+    ```
 
 5. The `*_commands.py` files receive the params as an input from the master services. Once this is done, they can be accessed via the variable we set above:
-  ```
-  self.__params.new_property
-  ```
+    ```
+    self.__params.new_property
+    ```
 
 
 ### Env file property walkthrough
@@ -391,9 +391,9 @@ The steps to update, for anything affecting an Ambari agent node, e.g. setup scr
 1. Edit the file(s) with your changes. The ambari-agent file must be edited, but generally better to update both for consistency.
 1. Restart the Ambari Agent to get the cache to pick up the modified file
 
-  ```
-  ambari-agent restart
-  ```
+    ```
+    ambari-agent restart
+    ```
 1. Start Metron through Ambari if it was stopped.
 
 ### Reinstalling the mpack
@@ -402,18 +402,18 @@ After we've modified files in Ambari and the mpack is working, it is a good idea
 1. Stop Metron through Ambari and remove the Metron service
 1. Rebuild the mpack on your local machine and deploy it to Vagrant, ensuring that all changes made directly to files in Ambari were also made in your local environment
 
-  ```
-  cd metron-deployment
-  mvn clean package
-  scp packaging/ambari/metron-mpack/target/metron_mpack-0.4.0.0.tar.gz root@node1:~
-  ```
+    ```
+    cd metron-deployment
+    mvn clean package
+    scp packaging/ambari/metron-mpack/target/metron_mpack-0.4.0.0.tar.gz root@node1:~
+    ```
 1. Log in to Vagrant, deploy the mpack and restart Ambari
 
-  ```
-  ssh root@node1
-  ambari-server install-mpack --mpack=metron_mpack-0.4.0.0.tar.gz --verbose --force
-  ambari-server restart
-  ```
+    ```
+    ssh root@node1
+    ambari-server install-mpack --mpack=metron_mpack-0.4.0.0.tar.gz --verbose --force
+    ambari-server restart
+    ```
 1. Install the mpack through Ambari as you normally would
 
 1. The same steps can be followed for Elasticsearch and Kibana by similary deploying the ES MPack located in elasticsearch-mpack/target.
@@ -454,21 +454,21 @@ The `security_enabled` param is already made available, along with appropriate k
 * Write scripts to be idempotent. The pattern currently used is to write a file out when a task is finished, e.g. setting up ACLs or tables.
 For example, when indexing is configured, a file is written out and checked based on a property.
 
-  ```
-  def set_configured(self):
-      File(self.__params.indexing_configured_flag_file,
-           content="",
-           owner=self.__params.metron_user,
-           mode=0755)
-  ```
-This is checked in the indexing master
-
-  ```
-  if not commands.is_configured():
-      commands.init_kafka_topics()
-      commands.init_hdfs_dir()
-      commands.set_configured()
-  ```
+    ```
+    def set_configured(self):
+        File(self.__params.indexing_configured_flag_file,
+             content="",
+             owner=self.__params.metron_user,
+             mode=0755)
+    ```
+  This is checked in the indexing master
+
+    ```
+    if not commands.is_configured():
+        commands.init_kafka_topics()
+        commands.init_hdfs_dir()
+        commands.set_configured()
+    ```
 
 * Ensure ACLs are properly managed. This includes Kafka and HBase. Often this involves a config file written out as above because this isn't idempotent!
   * Make sure to `kinit` as the correct user for setting up ACLs in a secured cluster. This is usually kafka for Kafka and hbase for HBase.
@@ -515,22 +515,22 @@ The main steps for upgrading a service are split into add-on and common services
 
 1. Update metainfo.xml
 
-   Change the version number and package name in `metron/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/${YOUR_VERSION_NUMBER_HERE}/metainfo.xml`, e.g.
-
-   ```
-   <version>5.6.2</version>
-   ...
-   <osSpecifics>
-       <osSpecific>
-           <osFamily>any</osFamily>
-           <packages>
-               <package>
-                   <name>elasticsearch-5.6.2</name>
-               </package>
-           </packages>
-       </osSpecific>
-   </osSpecifics>
-   ```
+    Change the version number and package name in `metron/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/${YOUR_VERSION_NUMBER_HERE}/metainfo.xml`, e.g.
+
+    ```
+    <version>5.6.2</version>
+    ...
+    <osSpecifics>
+        <osSpecific>
+            <osFamily>any</osFamily>
+            <packages>
+                <package>
+                    <name>elasticsearch-5.6.2</name>
+                </package>
+            </packages>
+        </osSpecific>
+    </osSpecifics>
+    ```
 
 #### Update Add-on Services
 
@@ -560,14 +560,14 @@ The main steps for upgrading a service are split into add-on and common services
 
 1. Update metainfo.xml
 
-   Change the version number in `metron/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/${YOUR_VERSION_NUMBER_HERE}/metainfo.xml`.
-   Also make sure to update the "extends" version to point to the updated common-services version, e.g.
+  Change the version number in `metron/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/${YOUR_VERSION_NUMBER_HERE}/metainfo.xml`.
+  Also make sure to update the "extends" version to point to the updated common-services version, e.g.
 
-   ```
-   <name>ELASTICSEARCH</name>
-   <version>5.6.2</version>
-   <extends>common-services/ELASTICSEARCH/5.6.2</extends>
-   ```
+    ```
+    <name>ELASTICSEARCH</name>
+    <version>5.6.2</version>
+    <extends>common-services/ELASTICSEARCH/5.6.2</extends>
+    ```
 
 #### Update mpack.json
 
@@ -610,16 +610,16 @@ The main steps for upgrading a service are split into add-on and common services
 
    Change the version number and package name in `metron/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/${YOUR_VERSION_NUMBER_HERE}/metainfo.xml`, e.g.
 
-   ```
-   <version>5.6.2</version>
-   ...
-   <packages>
-       ...
-       <package>
-           <name>kibana-5.6.2</name>
-       </package>
-   </packages>
-   ```
+    ```
+    <version>5.6.2</version>
+    ...
+    <packages>
+        ...
+        <package>
+            <name>kibana-5.6.2</name>
+        </package>
+    </packages>
+    ```
 
 #### Update Add-on Services
 
@@ -659,11 +659,11 @@ The main steps for upgrading a service are split into add-on and common services
 
    Change the version number in `metron/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/${YOUR_VERSION_NUMBER_HERE}/metainfo.xml`.
    Also make sure to update the "extends" version to point to the updated common-services version, e.g.
-   ```
-   <name>KIBANA</name>
-   <version>5.6.2</version>
-   <extends>common-services/KIBANA/5.6.2</extends>
-   ```
+    ```
+    <name>KIBANA</name>
+    <version>5.6.2</version>
+    <extends>common-services/KIBANA/5.6.2</extends>
+    ```
 
 #### Update mpack.json
 

http://git-wip-us.apache.org/repos/asf/metron/blob/daf543b1/metron-deployment/packaging/packer-build/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/packer-build/README.md b/metron-deployment/packaging/packer-build/README.md
index 8cb5ff5..ee05299 100644
--- a/metron-deployment/packaging/packer-build/README.md
+++ b/metron-deployment/packaging/packer-build/README.md
@@ -40,7 +40,7 @@ Build Single Images
 ----------------------
  Navigate to *your-project-directory*/metron-deployment/packer-build
  * Base Centos (full-dev)
- ```
+```
 bin/bento build base-centos-6.7.json
 ```
 

http://git-wip-us.apache.org/repos/asf/metron/blob/daf543b1/metron-interface/metron-rest/README.md
----------------------------------------------------------------------
diff --git a/metron-interface/metron-rest/README.md b/metron-interface/metron-rest/README.md
index 08cd6be..c928d8f 100644
--- a/metron-interface/metron-rest/README.md
+++ b/metron-interface/metron-rest/README.md
@@ -815,19 +815,19 @@ Request and Response objects are JSON formatted.  The JSON schemas are available
       * sensorType - The sensor type
       * patch - An array of [RFC 6902](https://tools.ietf.org/html/rfc6902) patches.
     * Example adding a field called `project` with value `metron` to the `bro` message with UUID of `000-000-0000` :
-  ```
-  {
-     "guid" : "000-000-0000",
-     "sensorType" : "bro",
-     "patch" : [
-      {
-                "op": "add"
-               , "path": "/project"
-               , "value": "metron"
-      }
-              ]
-   }
-  ```
+        ```
+        {
+           "guid" : "000-000-0000",
+           "sensorType" : "bro",
+           "patch" : [
+            {
+                      "op": "add"
+                     , "path": "/project"
+                     , "value": "metron"
+            }
+                    ]
+         }
+        ```
   * Returns:
     * 200 - nothing
     * 404 - document not found
@@ -839,21 +839,21 @@ Request and Response objects are JSON formatted.  The JSON schemas are available
       * guid - The Patch UUID
       * sensorType - The sensor type
       * replacement - A Map representing the replaced document
-    * Example replacing a `bro` message with guid of `000-000-0000`
-```
-   {
-     "guid" : "000-000-0000",
-     "sensorType" : "bro",
-     "replacement" : {
-       "source:type": "bro",
-       "guid" : "bro_index_2017.01.01.01:1",
-       "ip_src_addr":"192.168.1.2",
-       "ip_src_port": 8009,
-       "timestamp":200,
-       "rejected":false
-      }
-   }
-```
+    * Example replacing a `bro` message with guid of `000-000-0000` :
+        ```
+        {
+          "guid" : "000-000-0000",
+          "sensorType" : "bro",
+          "replacement" : {
+            "source:type": "bro",
+            "guid" : "bro_index_2017.01.01.01:1",
+            "ip_src_addr":"192.168.1.2",
+            "ip_src_port": 8009,
+            "timestamp":200,
+            "rejected":false
+          }
+        }
+        ```
   * Returns:
     * 200 - Current user
 

http://git-wip-us.apache.org/repos/asf/metron/blob/daf543b1/metron-platform/metron-enrichment/README.md
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/README.md b/metron-platform/metron-enrichment/README.md
index aa6fc99..cbf8ee8 100644
--- a/metron-platform/metron-enrichment/README.md
+++ b/metron-platform/metron-enrichment/README.md
@@ -364,7 +364,7 @@ Let's adjust the configurations for the Squid topology to annotate the messages
 
 * Edit the squid enrichment configuration at `$METRON_HOME/config/zookeeper/enrichments/squid.json` (this file will not exist, so create a new one) to add some new fields based on stellar queries: 
 
- ```
+```
 {
   "enrichment" : {
     "fieldMap": {

http://git-wip-us.apache.org/repos/asf/metron/blob/daf543b1/metron-sensors/pycapa/README.md
----------------------------------------------------------------------
diff --git a/metron-sensors/pycapa/README.md b/metron-sensors/pycapa/README.md
index ab4389f..7e688a8 100644
--- a/metron-sensors/pycapa/README.md
+++ b/metron-sensors/pycapa/README.md
@@ -46,20 +46,20 @@ General notes on the installation of Pycapa.
 
 1. Install system dependencies including the core development tools, Python libraries and header files, and Libpcap libraries and header files.  On CentOS 7+, you can install these requirements with the following command.
 
-   ```
-   yum -y install "@Development tools" python-devel libpcap-devel
-   ```
+    ```
+    yum -y install "@Development tools" python-devel libpcap-devel
+    ```
 
 1. Install Librdkafka at your chosen $PREFIX.
 
-   ```
-   export PREFIX=/usr
-   wget https://github.com/edenhill/librdkafka/archive/v0.9.4.tar.gz  -O - | tar -xz
-   cd librdkafka-0.9.4/
-   ./configure --prefix=$PREFIX
-   make
-   make install
-   ```
+    ```
+    export PREFIX=/usr
+    wget https://github.com/edenhill/librdkafka/archive/v0.9.4.tar.gz   -O - | tar -xz
+    cd librdkafka-0.9.4/
+    ./configure --prefix=$PREFIX
+    make
+    make install
+    ```
 
 1. Add Librdkafka to the dynamic library load path.
 
@@ -186,24 +186,24 @@ $ pycapa --producer \
 
 Consume 10 packets and create a libpcap-compliant pcap file.
 
-  ```
-  $ pycapa --consumer \
-      --kafka-broker localhost:9092 \
-      --kafka-topic pcap \
-      --max-packets 10 \
-      > out.pcap
-  $ tshark -r out.pcap
-      1   0.000000 199.193.204.147 → 192.168.0.3  TLSv1.2 151 Application Data
-      2   0.000005 199.193.204.147 → 192.168.0.3  TLSv1.2 1191 Application Data
-      3   0.000088  192.168.0.3 → 199.193.204.147 TCP 66 54788 → 443 [ACK] Seq=1 Ack=86 Win=4093 Len=0 TSval=961284465 TSecr=943744612
-      4   0.000089  192.168.0.3 → 199.193.204.147 TCP 66 54788 → 443 [ACK] Seq=1 Ack=1211 Win=4058 Len=0 TSval=961284465 TSecr=943744612
-      5   0.948788  192.168.0.3 → 192.30.253.125 TCP 54 54671 → 443 [ACK] Seq=1 Ack=1 Win=4096 Len=0
-      6   1.005175 192.30.253.125 → 192.168.0.3  TCP 66 [TCP ACKed unseen segment] 443 → 54671 [ACK] Seq=1 Ack=2 Win=31 Len=0 TSval=2658544467 TSecr=961240339
-      7   1.636312 fe80::1286:8cff:fe0e:65df → ff02::1      ICMPv6 134 Router Advertisement from 10:86:8c:0e:65:df
-      8   2.253052 192.175.27.112 → 192.168.0.3  TLSv1.2 928 Application Data
-      9   2.253140  192.168.0.3 → 192.175.27.112 TCP 66 55078 → 443 [ACK] Seq=1 Ack=863 Win=4069 Len=0 TSval=961286699 TSecr=967172238
-     10   2.494769  192.168.0.3 → 224.0.0.251  MDNS 82 Standard query 0x0000 PTR _googlecast._tcp.local, "QM" question
-  ```
+    ```
+    $ pycapa --consumer \
+        --kafka-broker localhost:9092 \
+        --kafka-topic pcap \
+        --max-packets 10 \
+        > out.pcap
+    $ tshark -r out.pcap
+        1   0.000000 199.193.204.147 → 192.168.0.3  TLSv1.2 151   Application Data
+        2   0.000005 199.193.204.147 → 192.168.0.3  TLSv1.2 1191   Application Data
+        3   0.000088  192.168.0.3 → 199.193.204.147 TCP 66 54788 → 443   [ACK] Seq=1 Ack=86 Win=4093 Len=0 TSval=961284465 TSecr=943744612
+        4   0.000089  192.168.0.3 → 199.193.204.147 TCP 66 54788 → 443   [ACK] Seq=1 Ack=1211 Win=4058 Len=0 TSval=961284465 TSecr=943744612
+        5   0.948788  192.168.0.3 → 192.30.253.125 TCP 54 54671 → 443   [ACK] Seq=1 Ack=1 Win=4096 Len=0
+        6   1.005175 192.30.253.125 → 192.168.0.3  TCP 66 [TCP ACKed unseen segment] 443 → 54671 [ACK] Seq=1 Ack=2 Win=31 Len=0 TSval=2658544467 TSecr=961240339
+        7   1.636312 fe80::1286:8cff:fe0e:65df → ff02::1      ICMPv6 134 Router Advertisement from 10:86:8c:0e:65:df
+        8   2.253052 192.175.27.112 → 192.168.0.3  TLSv1.2 928 Application Data
+        9   2.253140  192.168.0.3 → 192.175.27.112 TCP 66 55078 → 443 [ACK] Seq=1 Ack=863 Win=4069 Len=0 TSval=961286699 TSecr=967172238
+       10   2.494769  192.168.0.3 → 224.0.0.251  MDNS 82 Standard query 0x0000 PTR _googlecast._tcp.local, "QM" question
+    ```
 
 #### Example 5
 
@@ -280,19 +280,19 @@ The probe can be used in a Kerberized environment.  Follow these additional step
   * `sasl.kerberos.keytab`
   * `sasl.kerberos.principal`
 
-  ```
-  $ pycapa --producer \
-      --interface eth0 \
-      --kafka-broker kafka1:6667 \
-      --kafka-topic pcap --max-packets 10 \
-      -X security.protocol=SASL_PLAINTEXT \
-      -X sasl.kerberos.keytab=/etc/security/keytabs/metron.headless.keytab \
-      -X sasl.kerberos.principal=metron-metron@METRONEXAMPLE.COM
-  INFO:root:Connecting to Kafka; {'sasl.kerberos.principal': 'metron-metron@METRONEXAMPLE.COM', 'group.id': 'ORNLVWJZZUAA', 'security.protocol': 'SASL_PLAINTEXT', 'sasl.kerberos.keytab': '/etc/security/keytabs/metron.headless.keytab', 'bootstrap.servers': 'kafka1:6667'}
-  INFO:root:Starting packet capture
-  INFO:root:Waiting for '1' message(s) to flush
-  INFO:root:'10' packet(s) in, '10' packet(s) out
-  ```
+        ```
+        $ pycapa --producer \
+            --interface eth0 \
+            --kafka-broker kafka1:6667 \
+            --kafka-topic pcap --max-packets 10 \
+            -X security.protocol=SASL_PLAINTEXT \
+            -X sasl.kerberos.keytab=/etc/security/keytabs/metron.headless  .keytab \
+            -X sasl.kerberos.principal=metron-metron@METRONEXAMPLE.COM
+        INFO:root:Connecting to Kafka; {'sasl.kerberos.principal':   'metron-metron@METRONEXAMPLE.COM', 'group.id': 'ORNLVWJZZUAA',   'security.protocol': 'SASL_PLAINTEXT', 'sasl.kerberos.keytab':   '/etc/security/keytabs/metron.headless.keytab', 'bootstrap.servers': 'kafka1:6667'}
+        INFO:root:Starting packet capture
+        INFO:root:Waiting for '1' message(s) to flush
+        INFO:root:'10' packet(s) in, '10' packet(s) out
+        ```
 
 FAQs
 ====

http://git-wip-us.apache.org/repos/asf/metron/blob/daf543b1/site-book/pom.xml
----------------------------------------------------------------------
diff --git a/site-book/pom.xml b/site-book/pom.xml
index 3731c4a..1408542 100644
--- a/site-book/pom.xml
+++ b/site-book/pom.xml
@@ -71,12 +71,12 @@
             <plugin>
               <groupId>org.apache.maven.plugins</groupId>
               <artifactId>maven-site-plugin</artifactId>
-              <version>3.4</version>
+              <version>3.7</version>
               <dependencies>
 		<dependency>
                   <groupId>org.apache.maven.doxia</groupId>
                   <artifactId>doxia-module-markdown</artifactId>
-                  <version>1.6</version>
+                  <version>1.8</version>
 		</dependency>
               </dependencies>
               <executions>

http://git-wip-us.apache.org/repos/asf/metron/blob/daf543b1/site-book/src/site/src-resources/templates/site.xml.template
----------------------------------------------------------------------
diff --git a/site-book/src/site/src-resources/templates/site.xml.template b/site-book/src/site/src-resources/templates/site.xml.template
index 8523562..0ddf2cc 100644
--- a/site-book/src/site/src-resources/templates/site.xml.template
+++ b/site-book/src/site/src-resources/templates/site.xml.template
@@ -23,7 +23,7 @@
     <skin>
         <groupId>org.apache.maven.skins</groupId>
         <artifactId>maven-fluido-skin</artifactId>
-        <version>1.3.0</version>
+        <version>1.7</version>
     </skin>
 
     <custom>
@@ -47,9 +47,11 @@
 
     <body>
         <head>
+          <![CDATA[
             <script type="text/javascript">
-                $( document ).ready( function() { $( '.carousel' ).carousel( { interval: 3500 } ) } );
+              $( document ).ready( function() { $( '.carousel' ).carousel( { interval: 3500 } ) } );
             </script>
+          ]]>
         </head>
 
         <breadcrumbs position="left">


[43/50] [abbrv] metron git commit: METRON-1445: Update performance tuning guide with more explicit parameter instructions (mmiklavc via mmiklavc) closes apache/metron#988

Posted by rm...@apache.org.
METRON-1445: Update performance tuning guide with more explicit parameter instructions (mmiklavc via mmiklavc) closes apache/metron#988


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/e0949142
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/e0949142
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/e0949142

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: e0949142dd682a84e59fea09066de8024f106f13
Parents: daf543b
Author: mmiklavc <mi...@gmail.com>
Authored: Tue Apr 17 12:31:37 2018 -0600
Committer: Michael Miklavcic <mi...@gmail.com>
Committed: Tue Apr 17 12:31:37 2018 -0600

----------------------------------------------------------------------
 metron-platform/Performance-tuning-guide.md     | 244 +++++++++++-
 metron-platform/metron-common/README.md         |  32 ++
 .../src/main/scripts/cluster_info.py            | 389 +++++++++++++++++++
 3 files changed, 659 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/e0949142/metron-platform/Performance-tuning-guide.md
----------------------------------------------------------------------
diff --git a/metron-platform/Performance-tuning-guide.md b/metron-platform/Performance-tuning-guide.md
index 7d79ace..e2d1ae2 100644
--- a/metron-platform/Performance-tuning-guide.md
+++ b/metron-platform/Performance-tuning-guide.md
@@ -17,6 +17,14 @@ limitations under the License.
 -->
 # Metron Performance Tuning Guide
 
+- [Overview](#overview)
+- [General Tuning Suggestions](#general-tuning-suggestions)
+- [Component Tuning Levers](#component-tuning-levers)
+- [Use Case Specific Tuning Suggestions](#use-case-specific-tuning-suggestions)
+- [Debugging](#debugging)
+- [Issues](#issues)
+- [Reference](#reference)
+
 ## Overview
 
 This document provides guidance from our experiences tuning the Apache Metron Storm topologies for maximum performance. You'll find
@@ -31,20 +39,33 @@ pipe, and the majority of these options assist in tweaking the various pipe widt
 
 ## General Tuning Suggestions
 
+### Storm Executors vs. Tasks
+
 Note that there is currently no method for specifying the number of tasks from the number of executors in Flux topologies (enrichment,
  indexing). By default, the number of tasks will equal the number of executors. Logically, setting the number of tasks equal to the number
 of executors is sensible. Storm enforces num executors <= num tasks. The reason you might set the number of tasks higher than the number of
 executors is for future performance tuning and rebalancing without the need to bring down your topologies. The number of tasks is fixed
 at topology startup time whereas the number of executors can be increased up to a maximum value equal to the number of tasks.
 
-When configuring Storm Kafka spouts, we found that the default values for poll.timeout.ms, offset.commit.period.ms, and max.uncommitted.offsets worked well in nearly all cases.
-As a general rule, it was optimal to set spout parallelism equal to the number of partitions used in your Kafka topic. Any greater
+### Kafka Spout Configuration
+
+When configuring Storm Kafka spouts, we found that the default values for
+
+- `poll.timeout.ms`
+- `offset.commit.period.ms`
+- `max.uncommitted.offsets `
+
+worked well in nearly all cases. As a general rule, it was optimal to set spout parallelism equal to the number of partitions used in your Kafka topic. Any greater
 parallelism will leave you with idle consumers since Kafka limits the max number of consumers to the number of partitions. This is
 important because Kafka has certain ordering guarantees for message delivery per partition that would not be possible if more than
 one consumer in a given consumer group were able to read from that partition.
 
 ## Component Tuning Levers
 
+### High Level Overview
+
+There are a number of levers that can be set while tuning a Metron cluster. The main services to interact with for performance tuning are: Kafka, Storm, HDFS, and indexing (Elasticsearch or Solr). For each service, here is a high level breakdown of the major knobs and levers that can be modified while tuning your cluster.
+
 - Kafka
     - Number partitions
 - Storm
@@ -70,12 +91,15 @@ for more details.
 
 ### Storm Tuning
 
+#### Overview
+
 There are quite a few options you will be confronted with when tuning your Storm topologies and this is largely trial and error. As a general rule of thumb,
 we recommend starting with the defaults and smaller numbers in terms of parallelism while iteratively working up until the desired performance is achieved.
 You will find the offset lag tool indispensable while verifying your settings.
 
 We won't go into a full discussion about Storm's architecture - see references section for more info - but there are some general rules of thumb that should be
 followed. It's first important to understand the ways you can impact parallelism in a Storm topology.
+
 - num tasks
 - num executors (parallelism hint)
 - num workers
@@ -83,10 +107,10 @@ followed. It's first important to understand the ways you can impact parallelism
 Tasks are instances of a given spout or bolt, executors are threads in a process, and workers are jvm processes. You'll want the number of tasks as a multiple of the number of executors,
 the number of executors as multiple of the number of workers, and the number of workers as a multiple of the number of machines. The main reason for this approach is
  that it will give a uniform distribution of work to each machine and jvm process. More often than not, your number of tasks will be equal to the number of executors, which
- is the default in Storm. Flux does not actually provide a way to independently set number of tasks, so for enrichments and indexing which use Flux, num tasks will always equal
+ is the default in Storm. Flux does not actually provide a way to independently set number of tasks, so for enrichments and indexing, which use Flux, num tasks will always equal
  num executors.
 
-You can change the number of workers via the property `topology.workers`
+You can change the number of workers via the Storm property `topology.workers`
 
 __Other Storm Settings__
 
@@ -96,12 +120,15 @@ topology.max.spout.pending
 This is the maximum number of tuples that can be in flight (ie, not yet acked) at any given time within your topology. You set this as a form of backpressure to ensure
 you don't flood your topology.
 
+
 ```
 topology.ackers.executors
 ```
+
 This specifies how many threads should be dedicated to tuple acking. We found that setting this equal to the number of partitions in your inbound Kafka topic worked well.
 
 __spout-config.json__
+
 ```
 {
     ...
@@ -111,15 +138,146 @@ __spout-config.json__
 }
 ```
 
-These are the spout recommended defaults from Storm and are currently the defaults provided in the Kafka spout itself. In fact, if you find the recommended defaults work fine for you,
+Above is a snippet for configuring parsers. These are the spout recommended defaults from Storm and are currently the defaults provided in the Kafka spout itself. In fact, if you find the recommended defaults work fine for you,
 then you can omit these settings altogether.
 
+#### Where to Find Tuning Properties
+
+**Important:** The parser topologies are deployed via a builder pattern that takes parameters from the CLI as set via Ambari. The enrichment and indexing topologies are configured
+using a Storm Flux file, a configuration properties file, and Ambari. Here is a setting materialization summary for each of the topology types:
+
+- Parsers
+	- Management UI -> parser json config and CLI -> Storm
+- Enrichment
+	- Ambari UI -> properties file -> Flux -> Storm
+- Indexing
+	- Ambari UI -> properties file -> Flux -> Storm
+
+**Parsers**
+
+This is a mapping of the various performance tuning properties for parsers and how they are materialized.
+
+See more detail on starting parsers [here](https://github.com/apache/metron/blob/master/metron-platform/metron-parsers/README.md#starting-the-parser-topology)
+
+| Category                    | Management UI Property Name                | JSON Config File Property Name     | CLI Option                                                                                     | Storm Property Name             |  Notes                                                                        |
+|-----------------------------|--------------------------------------------|------------------------------------|------------------------------------------------------------------------------------------------|---------------------------------|-------------------------------------------------------------------------------|
+| Storm topology config       | Num Workers                                | n/a                                | -nw,--num_workers <NUM_WORKERS>                                                                | topology.workers                |                                                                               |
+|                             | Num Ackers                                 | n/a                                | -na,--num_ackers <NUM_ACKERS>                                                                  | topology.acker.executors        |                                                                               |
+|                             | Storm Config                               | topology.max.spout.pending         | -e,--extra_topology_options <JSON_FILE>, e.g. { "topology.max.spout.pending" : NUM }           | topology.max.spout.pending      | Put property in JSON format in a file named `storm-<MY_PARSER>-config.json`   |
+| Kafka spout                 | Spout Parallelism                          | n/a                                | -sp,--spout_p <SPOUT_PARALLELISM_HINT>                                                         | n/a                             |                                                                               |
+|                             | Spout Num Tasks                            | n/a                                | -snt,--spout_num_tasks <NUM_TASKS>                                                             | n/a                             |                                                                               |
+|                             | Spout Config                               | spout.pollTimeoutMs                | -esc,--extra_kafka_spout_config <JSON_FILE>, e.g. { "spout.pollTimeoutMs" : 200 }              | n/a                             | Put property in JSON format in a file named `spout-<MY_PARSER>-config.json`   |
+|                             | Spout Config                               | spout.maxUncommittedOffsets        | -esc,--extra_kafka_spout_config <JSON_FILE>, e.g. { "spout.maxUncommittedOffsets" : 10000000 } | n/a                             | Put property in JSON format in a file named `spout-<MY_PARSER>-config.json`   |
+|                             | Spout Config                               | spout.offsetCommitPeriodMs         | -esc,--extra_kafka_spout_config <JSON_FILE>, e.g. { "spout.offsetCommitPeriodMs" : 30000 }     | n/a                             | Put property in JSON format in a file named `spout-<MY_PARSER>-config.json`   |
+| Parser bolt                 | Parser Num Tasks                           | n/a                                | -pnt,--parser_num_tasks <NUM_TASKS>                                                            | n/a                             |                                                                               |
+|                             | Parser Parallelism                         | n/a                                | -pp,--parser_p <PARALLELISM_HINT>                                                              | n/a                             |                                                                               |
+|                             | Parser Parallelism                         | n/a                                | -pp,--parser_p <PARALLELISM_HINT>                                                              | n/a                             |                                                                               |
+
+**Enrichment**
+
+This is a mapping of the various performance tuning properties for enrichments and how they are materialized.
+
+Flux file found here - $METRON_HOME/flux/enrichment/remote.yaml
+
+_Note 1:_ Changes to Flux file properties that are managed by Ambari will render Ambari unable to further manage the property.
+
+_Note 2:_ Many of these settings will be irrelevant in the alternate non-split-join topology
+
+| Category                    | Ambari Property Name                       | enrichment.properties property                         | Flux Property                                          | Flux Section Location               | Storm Property Name             | Notes                                  |
+|-----------------------------|--------------------------------------------|--------------------------------------------------------|--------------------------------------------------------|-------------------------------------|---------------------------------|----------------------------------------|
+| Storm topology config       | enrichment_workers                         | enrichment.workers                                     | topology.workers                                       | line 18, config                     | topology.workers                |                                        |
+|                             | enrichment_acker_executors                 | enrichment.acker.executors                             | topology.acker.executors                               | line 18, config                     | topology.acker.executors        |                                        |
+|                             | enrichment_topology_max_spout_pending      | topology.max.spout.pending                             | topology.max.spout.pending                             | line 18, config                     | topology.max.spout.pending      |                                        |
+| Kafka spout                 | enrichment_kafka_spout_parallelism         | kafka.spout.parallelism                                | parallelism                                            | line 245, id: kafkaSpout            | n/a                             |                                        |
+|                             | n/a                                        | session.timeout.ms                                     | session.timeout.ms                                     | line 201, id: kafkaProps            | n/a                             | Kafka consumer client property         |
+|                             | n/a                                        | enable.auto.commit                                     | enable.auto.commit                                     | line 201, id: kafkaProps            | n/a                             | Kafka consumer client property         |
+|                             | n/a                                        | n/a                                                    | setPollTimeoutMs                                       | line 230, id: kafkaConfig           | n/a                             | Kafka consumer client property         |
+|                             | n/a                                        | n/a                                                    | setMaxUncommittedOffsets                               | line 230, id: kafkaConfig           | n/a                             | Kafka consumer client property         |
+|                             | n/a                                        | n/a                                                    | setOffsetCommitPeriodMs                                | line 230, id: kafkaConfig           | n/a                             | Kafka consumer client property         |
+| Enrichment splitter         | enrichment_split_parallelism               | enrichment.split.parallelism                           | parallelism                                            | line 253, id: enrichmentSplitBolt   | n/a                             |                                        |
+| Enrichment joiner           | enrichment_join_parallelism                | enrichment.join.parallelism                            | parallelism                                            | line 316, id: enrichmentJoinBolt    | n/a                             |                                        |
+| Threat intel splitter       | threat_intel_split_parallelism             | threat.intel.split.parallelism                         | parallelism                                            | line 338, id: threatIntelSplitBolt  | n/a                             |                                        |
+| Threat intel joiner         | threat_intel_join_parallelism              | threat.intel.join.parallelism                          | parallelism                                            | line 376, id: threatIntelJoinBolt   | n/a                             |                                        |
+| Output bolt                 | kafka_writer_parallelism                   | kafka.writer.parallelism                               | parallelism                                            | line 397, id: outputBolt            | n/a                             |                                        |
+
+When adding Kafka spout properties, there are 3 ways you'll do this.
+
+1. Ambari: If they are properties managed by Ambari (noted in the table under 'Ambari Property Name'), look for the setting in Ambari.
+
+1. Flux -> kafkaProps: add a new key/value to the kafkaProps section HashMap on line 201. For example, if you want to set the Kafka Spout consumer's session.timeout.ms to 30 seconds, you would add the following:
+
+    ```
+           -   name: "put"
+               args:
+                   - "session.timeout.ms"
+                   - 30000
+    ```
+
+1. Flux -> kafkaConfig: add a new setter to the kafkaConfig section on line 230. For example, if you want to set the Kafka Spout consumer's poll timeout to 200 milliseconds, you would add the following under `configMethods`:
+
+    ```
+             -   name: "setPollTimeoutMs"
+                 args:
+                     - 200
+    ```
+
+**Indexing (Batch)**
+
+This is a mapping of the various performance tuning properties for indexing and how they are materialized.
+
+Flux file can be found here - $METRON_HOME/flux/indexing/batch/remote.yaml.
+
+Note: Changes to Flux file properties that are managed by Ambari will render Ambari unable to further manage the property.
+
+| Category                    | Ambari Property Name                       | hdfs.properties property                               | Flux Property                                          | Flux Section Location               | Storm Property Name             | Notes                                  |
+|-----------------------------|--------------------------------------------|--------------------------------------------------------|--------------------------------------------------------|-------------------------------------|---------------------------------|----------------------------------------|
+| Storm topology config       | enrichment_workers                         | enrichment.workers                                     | topology.workers                                       | line 19, config                     | topology.workers                |                                        |
+|                             | enrichment_acker_executors                 | enrichment.acker.executors                             | topology.acker.executors                               | line 19, config                     | topology.acker.executors        |                                        |
+|                             | enrichment_topology_max_spout_pending      | topology.max.spout.pending                             | topology.max.spout.pending                             | line 19, config                     | topology.max.spout.pending      |                                        |
+| Kafka spout                 | batch_indexing_kafka_spout_parallelism     | kafka.spout.parallelism                                | parallelism                                            | line 123, id: kafkaSpout            | n/a                             |                                        |
+|                             | n/a                                        | session.timeout.ms                                     | session.timeout.ms                                     | line 80, id: kafkaProps             | n/a                             | Kafka consumer client property         |
+|                             | n/a                                        | enable.auto.commit                                     | enable.auto.commit                                     | line 80, id: kafkaProps             | n/a                             | Kafka consumer client property         |
+|                             | n/a                                        | n/a                                                    | setPollTimeoutMs                                       | line 108, id: kafkaConfig           | n/a                             | Kafka consumer client property         |
+|                             | n/a                                        | n/a                                                    | setMaxUncommittedOffsets                               | line 108, id: kafkaConfig           | n/a                             | Kafka consumer client property         |
+|                             | n/a                                        | n/a                                                    | setOffsetCommitPeriodMs                                | line 108, id: kafkaConfig           | n/a                             | Kafka consumer client property         |
+| Output bolt                 | hdfs_writer_parallelism                    | hdfs.writer.parallelism                                | parallelism                                            | line 133, id: hdfsIndexingBolt      | n/a                             |                                        |
+|                             | n/a                                        | n/a                                                    | hdfsSyncPolicy (see notes below)                       | line 47, id: hdfsWriter             | n/a                             | See notes below about adding this prop |
+|                             | bolt_hdfs_rotation_policy_units            | bolt.hdfs.rotation.policy.units                        | constructorArgs                                        | line 41, id: hdfsRotationPolicy     | n/a                             |                                        |
+|                             | bolt_hdfs_rotation_policy_count            | bolt.hdfs.rotation.policy.count                        | constructorArgs                                        | line 41, id: hdfsRotationPolicy     | n/a                             |                                        |
+
+_Note_: HDFS sync policy is not currently managed via Ambari. You will need to modify the Flux file directly to accommodate this setting. e.g.
+
+Add a new setter to the hdfsWriter around line 56. Lines 53-55 provided for context.
+
+```
+ 53             -   name: "withRotationPolicy"
+ 54                 args:
+ 55                     - ref: "hdfsRotationPolicy
+ 56             -   name: "withSyncPolicy"
+ 57                 args:
+ 58                     - ref: "hdfsSyncPolicy
+```
+
+Add an hdfsSyncPolicy after the hdfsRotationPolicy that appears on line 41. e.g.
+
+```
+ 41     -   id: "hdfsRotationPolicy"
+...
+ 45           - "${bolt.hdfs.rotation.policy.units}"
+ 46
+ 47     -   id: "hdfsSyncPolicy"
+ 48         className: "org.apache.storm.hdfs.bolt.sync.CountSyncPolicy"
+ 49         constructorArgs:
+ 50           -  100000
+```
+
 ## Use Case Specific Tuning Suggestions
 
 The below discussion outlines a specific tuning exercise we went through for driving 1 Gbps of traffic through a Metron cluster running with 4 Kafka brokers and 4
 Storm Supervisors.
 
 General machine specs
+
 - 10 Gb network cards
 - 256 GB memory
 - 12 disks
@@ -174,6 +332,7 @@ ${KAFKA_HOME}/bin/kafka-consumer-groups.sh \
 
 This will return a table with the following output depicting offsets for all partitions and consumers associated with the specified
 consumer group:
+
 ```
 GROUP                          TOPIC              PARTITION  CURRENT-OFFSET  LOG-END-OFFSET  LAG             OWNER
 enrichments                    enrichments        9          29746066        29746067        1               consumer-2_/xxx.xxx.xxx.xxx
@@ -212,6 +371,7 @@ We started with a single partition for the inbound Kafka topics and eventually w
 The default is 'null' which would result in no limit.
 
 __storm-bro.config__
+
 ```
 {
     ...
@@ -223,6 +383,7 @@ __storm-bro.config__
 And the following default spout settings. Again, this can be ommitted entirely since we are using the defaults.
 
 __spout-bro.config__
+
 ```
 {
     ...
@@ -252,6 +413,7 @@ though you could certainly do so if necessary. Notice that we only needed 1 work
 ```
 
 From the usage docs, here are the options we've used. The full reference can be found [here](../metron-platform/metron-parsers/README.md#Starting_the_Parser_Topology).
+
 ```
 usage: start_parser_topology.sh
  -e,--extra_topology_options <JSON_FILE>               Extra options in the form
@@ -290,6 +452,7 @@ Note that the main Metron-specific option we've changed to accomodate the desire
 More information on Flux can be found here - http://storm.apache.org/releases/1.0.1/flux.html
 
 __General storm settings__
+
 ```
 topology.workers: 8
 topology.acker.executors: 48
@@ -297,6 +460,7 @@ topology.max.spout.pending: 2000
 ```
 
 __Spout and Bolt Settings__
+
 ```
 kafkaSpout
     parallelism=48
@@ -341,6 +505,7 @@ cat ${METRON_HOME}/config/zookeeper/indexing/bro.json
 And here are the settings we used for the indexing topology
 
 __General storm settings__
+
 ```
 topology.workers: 4
 topology.acker.executors: 24
@@ -348,6 +513,7 @@ topology.max.spout.pending: 2000
 ```
 
 __Spout and Bolt Settings__
+
 ```
 hdfsSyncPolicy
     org.apache.storm.hdfs.bolt.sync.CountSyncPolicy
@@ -372,12 +538,14 @@ PCAP is a specialized topology that is a Spout-only topology. Both Kafka topic c
 avoid the additional network hop required if using an additional bolt.
 
 __General Storm topology properties__
+
 ```
 topology.workers=16
 topology.ackers.executors: 0
 ```
 
 __Spout and Bolt properties__
+
 ```
 kafkaSpout
     parallelism: 128
@@ -403,6 +571,69 @@ writerConfig
         dfs.blocksize=1073741824
 ```
 
+## Debugging
+
+Set the following env vars accordingly for your cluster. This is how we would configure it for the Metron full dev development environment.
+
+```
+export HDP_HOME=/usr/hdp/current
+export KAFKA_HOME=$HDP_HOME/kafka-broker
+export STORM_UI=http://node1:8744
+export ELASTIC=http://node1:9200
+export ZOOKEEPER=node1:2181
+export METRON_VERSION=0.4.3
+export METRON_HOME=/usr/metron/${METRON_VERSION}
+```
+
+Note that the output from Storm will be a flattened blob of JSON. In order to pretty print for readability, you can pipe it through a JSON formatter, e.g.
+
+```
+[some Storm curl command] | python -m json.tool
+```
+
+**Getting Storm Configuration Details**
+
+Storm has a useful REST API you can use to get full details about your running topologies. This is generally more convenient and complete for troubleshooting performance problems than going to the Storm UI alone. See Storm's [REST API docs](http://storm.apache.org/releases/1.1.0/STORM-UI-REST-API.html) for more details.
+
+```
+# get Storm cluster summary info including version
+curl -XGET ${STORM_UI}'/api/v1/cluster/summary'
+```
+
+```
+# get overall Storm cluster configuration
+curl -XGET ${STORM_UI}'/api/v1/cluster/configuration'
+```
+
+```
+# get list of topologies and brief summary detail
+curl -XGET ${STORM_UI}'/api/v1/topology/summary'
+```
+
+```
+# get all topology runtime settings. Plugin the ID for your topology, which you can get from the topology summary command or from the Storm UI. Passing sys=1 will also return system stats.
+curl -XGET ${STORM_UI}'/api/v1/topology/:id?sys=1​'
+```
+
+**Getting Kafka Configuration Details**
+
+```
+# Get list of Kafka topics
+${HDP_HOME}/kafka-broker/bin/kafka-topics.sh --zookeeper $ZOOKEEPER --list
+```
+
+```
+# Get Kafka topic details - plugin the desired topic name in place of "enrichments"
+${HDP_HOME}/kafka-broker/bin/kafka-topics.sh --zookeeper $ZOOKEEPER --topic enrichments --describe
+```
+
+**Getting Metron Topology Zookeeper Configuration**
+
+```
+# Provides a full listing of all Metron parser, enrichment, and indexing topology configuration
+$METRON_HOME/bin/zk_load_configs.sh -m DUMP -z $ZOOKEEPER
+```
+
 ## Issues
 
 __Error__
@@ -423,11 +654,12 @@ modifying the options outlined above, increasing the poll timeout, or both.
 ## Reference
 
 * [Enrichment Performance](metron-enrichment/Performance.md)
-* http://storm.apache.org/releases/1.0.1/flux.html
+* http://storm.apache.org/releases/1.1.0/flux.html
 * https://stackoverflow.com/questions/17257448/what-is-the-task-in-storm-parallelism
 * http://storm.apache.org/releases/current/Understanding-the-parallelism-of-a-Storm-topology.html
 * http://www.malinga.me/reading-and-understanding-the-storm-ui-storm-ui-explained/
 * https://www.confluent.io/blog/how-to-choose-the-number-of-topicspartitions-in-a-kafka-cluster/
 * https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.1/bk_storm-component-guide/content/storm-kafkaspout-perf.html
+* http://storm.apache.org/releases/1.1.0/STORM-UI-REST-API.html
 
 

http://git-wip-us.apache.org/repos/asf/metron/blob/e0949142/metron-platform/metron-common/README.md
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/README.md b/metron-platform/metron-common/README.md
index ab90a66..b25fbc8 100644
--- a/metron-platform/metron-common/README.md
+++ b/metron-platform/metron-common/README.md
@@ -23,6 +23,7 @@ limitations under the License.
 * [Management Utility](#management-utility)
 * [Topology Errors](topology-errors)
 * [Performance Logging](#performance-logging)
+* [Metron Debugging](#metron-debugging)
 
 # Stellar Language
 
@@ -400,3 +401,34 @@ __Side Effects__
 Calling the mark() method multiple times simply resets the start time to the current nano time. Calling log() with a non-existent mark name will log 0 ns elapsed time with a warning indicating that log has been invoked for a mark name that does not exist.
 The class is not thread-safe and makes no attempt at keeping multiple threads from modifying the same markers.
 
+# Metron Debugging
+
+A Python script is provided for gathering information useful in debugging your Metron cluster. Run from the node that has Metron installed on it. All options listed below are required.
+
+_Note:_ Be aware that no anonymization/scrubbing is performed on the captured configuration details.
+
+```
+# $METRON_HOME/bin/cluster_info.py -h
+Usage: cluster_info.py [options]
+
+Options:
+  -h, --help            show this help message and exit
+  -a HOST:PORT, --ambari-host=HOST:PORT
+                        Connect to Ambari via the supplied host:port
+  -c NAME, --cluster-name=NAME
+                        Name of cluster in Ambari to retrieve info for
+  -o DIRECTORY, --out-dir=DIRECTORY
+                        Write debugging data to specified root directory
+  -s HOST:PORT, --storm-host=HOST:PORT
+                        Connect to Storm via the supplied host:port
+  -b HOST1:PORT,HOST2:PORT, --broker_list=HOST1:PORT,HOST2:PORT
+                        Connect to Kafka via the supplied comma-delimited
+                        host:port list
+  -z HOST1:PORT,HOST2:PORT, --zookeeper_quorum=HOST1:PORT,HOST2:PORT
+                        Connect to Zookeeper via the supplied comma-delimited
+                        host:port quorum list
+  -m DIRECTORY, --metron_home=DIRECTORY
+                        Metron home directory
+  -p DIRECTORY, --hdp_home=DIRECTORY
+                        HDP home directory
+```

http://git-wip-us.apache.org/repos/asf/metron/blob/e0949142/metron-platform/metron-common/src/main/scripts/cluster_info.py
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/scripts/cluster_info.py b/metron-platform/metron-common/src/main/scripts/cluster_info.py
new file mode 100755
index 0000000..6e853c0
--- /dev/null
+++ b/metron-platform/metron-common/src/main/scripts/cluster_info.py
@@ -0,0 +1,389 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from optparse import OptionParser
+from requests.auth import HTTPBasicAuth
+from contextlib import closing
+import datetime
+import getpass
+import json
+import os
+import os.path
+import requests
+import shutil
+import subprocess
+import sys
+import tarfile
+import zlib
+
+INDENT_SIZE = 2
+
+class UserPrompt(object):
+    
+    def __init__(self, prompt):
+        self.prompt = prompt
+
+    def get_hidden(self):
+        return getpass.getpass(self.prompt)
+
+class FileWriter(object):
+
+    def write(self, path, content):
+        print "Writing config to " + path
+        if not os.path.exists(os.path.dirname(path)):
+            try:
+                os.makedirs(os.path.dirname(path))
+            except OSError as exc: # Guard against race condition
+                if exc.errno != errno.EEXIST:
+                    raise
+        with open(path, 'w') as outfile:
+            outfile.write(content)
+        print "...done"
+
+class ShellHandler(object):
+
+    def __init__(self):
+        pass
+
+    # returns full stdout of process call
+    def call(self, command):
+        try:
+            return subprocess.call(command)
+        except OSError as e:
+            print >> sys.stderr, "Execution failed:", e
+    
+    # partly hijacked from Python 2.7+ check_output for use in 2.6
+    def ret_output(self, cmd):
+        process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
+        output, unused_err = process.communicate()
+        retcode = process.poll()
+        if retcode:
+            raise subprocess.CalledProcessError(retcode, cmd, output=output)
+        return output
+
+class InfoGatherer(object):
+
+    def __init__(self, name):
+        self.name = name
+
+class AmbariInfo(InfoGatherer):
+
+    def __init__(self, host_info, cluster_name):
+        super(AmbariInfo, self).__init__('Ambari')
+        self.cluster_name = cluster_name
+        self.ambari_config_url = 'http://{0}/api/v1/clusters/{1}/configurations/service_config_versions'.format(host_info, cluster_name)
+        self.params_payload = { 'is_current' : 'true' }
+
+    def collect(self, out_dir):
+        print "Ambari request URL: " + self.ambari_config_url
+        ambari_user = UserPrompt('Ambari username: ').get_hidden()
+        ambari_pass = UserPrompt('Ambari password: ').get_hidden()
+        self.get_cluster_config(out_dir, ambari_user, ambari_pass)
+
+    def get_cluster_config(self, out_dir, ambari_user, ambari_pass):
+        # set encoding to 'identity' to keep Ambari from passing back gzipped content for large requests
+        headers = {
+                    'X-Requested-By' : 'ambari',
+                    'Authorization' : 'Basic',
+                    'Accept-Encoding': 'identity'
+                  }
+        # Retrieving Ambari config detail
+        response = requests.get(self.ambari_config_url, headers=headers, params=self.params_payload, stream=True, auth=HTTPBasicAuth(ambari_user, ambari_pass))
+        if response.status_code == 200:
+            file_name = 'ambari-cluster-config.json'
+            full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
+            FileWriter().write(full_out_path, response.text)
+        else:
+            print "Request failed with status code: " + str(response.status_code)
+
+class StormInfo(InfoGatherer):
+
+    def __init__(self, host_info):
+        super(StormInfo, self).__init__('Storm')
+        url_base = 'http://{0}/api/v1'.format(host_info)
+        self.url_cluster_summary = url_base + '/cluster/summary'
+        self.url_cluster_configuration = url_base + '/cluster/configuration'
+        self.url_topology_summary = url_base + '/topology/summary'
+        self.url_topology_stats_summary = url_base + '/topology/{0}?sys=1'
+
+    def collect(self, out_dir):
+        self.get_cluster_summary(out_dir)
+        self.get_cluster_configuration(out_dir)
+        self.get_topology_summary(out_dir)
+        self.get_topology_stats_summary(out_dir)
+
+    def get_cluster_summary(self, out_dir):
+        response = requests.get(self.url_cluster_summary)
+        if response.status_code == 200:
+            file_name = 'cluster-summary.json'
+            full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
+            FileWriter().write(full_out_path, json.dumps(response.json(), indent=INDENT_SIZE))
+        else:
+            print "Request failed with status code: " + str(response.status_code)
+
+    def get_cluster_configuration(self, out_dir):
+        response = requests.get(self.url_cluster_configuration)
+        if response.status_code == 200:
+            file_name = 'cluster-configuration.json'
+            full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
+            FileWriter().write(full_out_path, json.dumps(response.json(), indent=INDENT_SIZE))
+        else:
+            print "Request failed with status code: " + str(response.status_code)
+
+    def get_topology_summary(self, out_dir):
+        response = requests.get(self.url_topology_summary)
+        if response.status_code == 200:
+            file_name = 'topology-summary.json'
+            full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
+            FileWriter().write(full_out_path, json.dumps(response.json(), indent=INDENT_SIZE))
+        else:
+            print "Request failed with status code: " + str(response.status_code)
+
+    def get_topology_stats_summary(self, out_dir):
+        summary_response = requests.get(self.url_topology_summary)
+        if summary_response.status_code == 200:
+            for feature, value in summary_response.json().iteritems():
+                if feature == 'topologies':
+                    for topology in value:
+                        for k, v in topology.iteritems():
+                            if k == 'id':
+                                print "Retrieving Storm topology stats summary for topology-id " + v
+                                response = requests.get(self.url_topology_stats_summary.format(v))
+                                if response.status_code == 200:
+                                    file_name = 'topology-{0}-stats-summary.json'.format(v)
+                                    full_out_path = os.path.join(out_dir, self.name.lower(), 'stats-summaries', file_name)
+                                    FileWriter().write(full_out_path, json.dumps(response.json(), indent=INDENT_SIZE))
+                                else:
+                                    print "Request failed with status code: " + str(response.status_code)
+        else:
+            print "Topology listing request failed with status code: " + str(summary_response.status_code)
+
+class KafkaInfo(InfoGatherer):
+
+    def __init__(self, broker_list, zookeeper_quorum, hdp_home):
+        super(KafkaInfo, self).__init__('Kafka')
+        self.broker_list = broker_list
+        self.zookeeper_quorum = zookeeper_quorum
+        self.hdp_home = hdp_home
+        # note, need to escape the last single quote with the trim command so the string literal works
+        self.cmd_broker_id = '''{0}/kafka-broker/bin/zookeeper-shell.sh {1} <<< "ls /brokers/ids" | grep -e '\[.*\]' | tr -d [] | tr , ' \''''.format(self.hdp_home, self.zookeeper_quorum)
+        # broker id is dynamic and replaced later
+        self.cmd_broker_info = '''echo "get /brokers/ids/{0}" | {1}/kafka-broker/bin/zookeeper-shell.sh {2} 2>&1'''.format('{0}', self.hdp_home, self.zookeeper_quorum)
+        self.cmd_kafka_topics = '''{0}/kafka-broker/bin/kafka-topics.sh --zookeeper {1} --list'''.format(self.hdp_home, self.zookeeper_quorum)
+        self.cmd_topic_detail = '''{0}/kafka-broker/bin/kafka-topics.sh --zookeeper {1} --topic {2} --describe'''.format(self.hdp_home, self.zookeeper_quorum, '{0}')
+
+    def collect(self, out_dir):
+        print "Retrieving Kafka detail"
+        self.get_broker_info(out_dir)
+        self.get_kafka_topics(out_dir)
+        self.get_topic_detail(out_dir)
+
+    def get_broker_info(self, out_dir):
+        print "Retrieving Kafka broker info"
+        broker_ids = ShellHandler().ret_output(self.cmd_broker_id)
+        for broker in broker_ids.strip().split(','):
+            file_name = 'kafka-broker-{0}-info.txt'.format(broker)
+            full_out_path = os.path.join(out_dir, self.name.lower(), 'broker-info', file_name)
+            broker_data = ShellHandler().ret_output(self.cmd_broker_info.format(broker))
+            FileWriter().write(full_out_path, broker_data)
+
+    def get_kafka_topics(self, out_dir):
+        file_name = 'kafka-topics.txt'
+        full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
+        topic_list = ShellHandler().ret_output(self.cmd_kafka_topics)
+        FileWriter().write(full_out_path, topic_list)
+
+    def get_topic_detail(self, out_dir):
+        file_name = 'kafka-enrichments-topic.txt'
+        full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
+        enrichment_topic_detail = ShellHandler().ret_output(self.cmd_topic_detail.format('enrichments'))
+        FileWriter().write(full_out_path, enrichment_topic_detail)
+
+        file_name = 'kafka-indexing-topic.txt'
+        full_out_path = os.path.join(out_dir, self.name.lower(), file_name)
+        indexing_topic_detail = ShellHandler().ret_output(self.cmd_topic_detail.format('indexing'))
+        FileWriter().write(full_out_path, indexing_topic_detail)
+
+class MetronInfo(InfoGatherer):
+
+    def __init__(self, metron_home, zookeeper_quorum):
+        super(MetronInfo, self).__init__('Metron')
+        self.metron_home = metron_home
+        self.zookeeper_quorum = zookeeper_quorum
+        self.cmd_zk_load_configs = '''{0}/bin/zk_load_configs.sh -m DUMP -z {1}'''.format(self.metron_home, self.zookeeper_quorum)
+        self.cmd_metron_lib_list = '''ls -al {0}/lib'''.format(self.metron_home)
+
+    def collect(self, out_dir):
+        self.get_metron_config(out_dir)
+        self.get_metron_flux(out_dir)
+        self.get_metron_zk_config(out_dir)
+        self.get_lib_listing(out_dir)
+        self.get_rpm_listing(out_dir)
+    
+    def get_metron_config(self, out_dir):
+        print 'Copying ' + self.metron_home + '/config'
+        full_out_path = os.path.join(out_dir, self.name.lower(), 'config')
+        shutil.copytree(self.metron_home + '/config', full_out_path)
+
+    def get_metron_flux(self, out_dir):
+        print 'Copying ' + self.metron_home + '/flux'
+        full_out_path = os.path.join(out_dir, self.name.lower(), 'flux')
+        shutil.copytree(self.metron_home + '/flux', full_out_path)
+
+    def get_metron_zk_config(self, out_dir):
+        zk_config_dump = ShellHandler().ret_output(self.cmd_zk_load_configs)
+        full_out_path = os.path.join(out_dir, self.name.lower(), 'zk-configs.txt')
+        FileWriter().write(full_out_path, zk_config_dump)
+
+    def get_lib_listing(self, out_dir):
+        metron_lib_list = ShellHandler().ret_output(self.cmd_metron_lib_list)
+        full_out_path = os.path.join(out_dir, self.name.lower(), 'metron-libs-dir.txt')
+        FileWriter().write(full_out_path, metron_lib_list)
+
+    def get_rpm_listing(self, out_dir):
+        metron_rpm_list = ShellHandler().ret_output('''rpm -qa | grep 'metron\|elasticsearch\|kibana\'''')
+        full_out_path = os.path.join(out_dir, self.name.lower(), 'metron-rpm-list.txt')
+        FileWriter().write(full_out_path, metron_rpm_list)
+
+class HdpInfo(InfoGatherer):
+
+    def __init__(self, hdp_home):
+        super(HdpInfo, self).__init__('HDP')
+        self.hdp_home = hdp_home
+
+    def collect(self, out_dir):
+        hadoop_version_info = ShellHandler().ret_output('hadoop version')
+        full_out_path = os.path.join(out_dir, self.name.lower(), 'version-info.txt')
+        FileWriter().write(full_out_path, hadoop_version_info)
+
+class ClusterInfo:
+
+    def __init__(self):
+        pass
+
+    def main(self):
+        (options, args) = self.get_cli_args()
+        self.collect_data(options.out_dir,
+                          options.ambari_host,
+                          options.cluster_name,
+                          options.storm_host,
+                          options.broker_list,
+                          options.zookeeper_quorum,
+                          options.metron_home,
+                          options.hdp_home)
+
+    def get_cli_args(self):
+        parser = OptionParser()
+        parser.add_option("-a", "--ambari-host", 
+                          action="store",
+                          type="string",
+                          dest="ambari_host",
+                          help="Connect to Ambari via the supplied host:port",
+                          default="node1:8080",
+                          metavar="HOST:PORT")
+        parser.add_option("-c", "--cluster-name", 
+                          action="store",
+                          type="string",
+                          dest="cluster_name",
+                          help="Name of cluster in Ambari to retrieve info for",
+                          default="metron_cluster",
+                          metavar="NAME")
+        parser.add_option("-o", "--out-dir", 
+                          action="store",
+                          type="string",
+                          dest="out_dir",
+                          help="Write debugging data to specified root directory",
+                          default=".",
+                          metavar="DIRECTORY")
+        parser.add_option("-s", "--storm-host", 
+                          action="store",
+                          type="string",
+                          dest="storm_host",
+                          help="Connect to Storm via the supplied host:port",
+                          default="node1:8744",
+                          metavar="HOST:PORT")
+        parser.add_option("-b", "--broker_list", 
+                          action="store",
+                          type="string",
+                          dest="broker_list",
+                          help="Connect to Kafka via the supplied comma-delimited host:port list",
+                          default="node1:6667",
+                          metavar="HOST1:PORT,HOST2:PORT")
+        parser.add_option("-z", "--zookeeper_quorum", 
+                          action="store",
+                          type="string",
+                          dest="zookeeper_quorum",
+                          help="Connect to Zookeeper via the supplied comma-delimited host:port quorum list",
+                          default="node1:2181",
+                          metavar="HOST1:PORT,HOST2:PORT")
+        parser.add_option("-m", "--metron_home", 
+                          action="store",
+                          type="string",
+                          dest="metron_home",
+                          help="Metron home directory",
+                          default="/usr/metron/0.4.3",
+                          metavar="DIRECTORY")
+        parser.add_option("-p", "--hdp_home", 
+                          action="store",
+                          type="string",
+                          dest="hdp_home",
+                          help="HDP home directory",
+                          default="/usr/hdp/current",
+                          metavar="DIRECTORY")
+
+        return parser.parse_args()
+    
+    def collect_data(self, 
+                     out_dir_base,
+                     ambari_host,
+                     cluster_name,
+                     storm_host,
+                     broker_list,
+                     zookeeper_quorum,
+                     metron_home,
+                     hdp_home):
+        out_dir = self.get_out_dirname(out_dir_base)
+        info_getters = [
+                AmbariInfo(ambari_host, cluster_name),
+                StormInfo(storm_host),
+                KafkaInfo(broker_list, zookeeper_quorum, hdp_home),
+                MetronInfo(metron_home, zookeeper_quorum),
+                HdpInfo(hdp_home)
+        ]
+        for getter in info_getters:
+            getter.collect(out_dir)
+        self.compress_files(out_dir)
+        print "Finished gathering debug info"
+
+    # creates dir w/timestamp to drop all configs
+    # e.g. metron-debug-2018-03-24_06-50-34
+    def get_out_dirname(self, out_dir_base):
+        return os.path.join(out_dir_base, 'metron-debug-' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
+
+    def compress_files(self, out_dir):
+        tarball_name = out_dir + '.tgz'
+        print "Creating tarfile bundle with all configs: '{0}'".format(tarball_name)
+        with closing(tarfile.open(tarball_name, 'w:gz')) as tar:
+            tar.add(out_dir, arcname=os.path.basename(out_dir))
+        print "...done"
+
+if __name__ == "__main__":
+    ClusterInfo().main()
+


[24/50] [abbrv] metron git commit: METRON-1462: Separate ES and Kibana from Metron Mpack (mmiklavc via mmiklavc) closes apache/metron#943

Posted by rm...@apache.org.
METRON-1462: Separate ES and Kibana from Metron Mpack (mmiklavc via mmiklavc) closes apache/metron#943


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/0ab39a32
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/0ab39a32
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/0ab39a32

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 0ab39a32b61104812a6a9e69e999e7053e3e7e90
Parents: 0d847cf
Author: mmiklavc <mi...@gmail.com>
Authored: Thu Apr 5 09:07:48 2018 -0600
Committer: Michael Miklavcic <mi...@gmail.com>
Committed: Thu Apr 5 09:07:48 2018 -0600

----------------------------------------------------------------------
 .../roles/ambari_master/defaults/main.yml       |   2 +
 .../ambari_master/tasks/elasticsearch_mpack.yml |  26 ++
 .../ansible/roles/ambari_master/tasks/main.yml  |   3 +-
 .../roles/ambari_master/tasks/metron_mpack.yml  |  26 ++
 .../ansible/roles/ambari_master/tasks/mpack.yml |  26 --
 .../roles/load_web_templates/tasks/main.yml     |   2 +-
 .../manual-install/Manual_Install_CentOS6.md    |   4 +-
 metron-deployment/packaging/ambari/README.md    |  25 +-
 .../ambari/elasticsearch-mpack/README.md        |  62 +++++
 .../ambari/elasticsearch-mpack/pom.xml          |  95 +++++++
 .../src/main/assemblies/elasticsearch-mpack.xml |  43 +++
 .../ELASTICSEARCH/5.6.2/metainfo.xml            |  29 ++
 .../ELASTICSEARCH/5.6.2/repos/repoinfo.xml      |  45 ++++
 .../addon-services/KIBANA/5.6.2/metainfo.xml    |  30 +++
 .../KIBANA/5.6.2/quicklinks/quicklinks.json     |  27 ++
 .../KIBANA/5.6.2/repos/repoinfo.xml             |  60 +++++
 .../5.6.2/configuration/elastic-env.xml         |  86 ++++++
 .../5.6.2/configuration/elastic-jvm-options.xml | 144 ++++++++++
 .../5.6.2/configuration/elastic-site.xml        | 198 ++++++++++++++
 .../5.6.2/configuration/elastic-sysconfig.xml   |  97 +++++++
 .../5.6.2/configuration/elastic-systemd.xml     |  30 +++
 .../ELASTICSEARCH/5.6.2/metainfo.xml            |  97 +++++++
 .../5.6.2/package/scripts/elastic_commands.py   | 266 +++++++++++++++++++
 .../5.6.2/package/scripts/elastic_master.py     |  72 +++++
 .../5.6.2/package/scripts/elastic_slave.py      |  71 +++++
 .../5.6.2/package/scripts/params.py             | 108 ++++++++
 .../5.6.2/package/scripts/properties_config.py  |  34 +++
 .../5.6.2/package/scripts/service_check.py      | 114 ++++++++
 .../5.6.2/package/scripts/status_params.py      |  27 ++
 .../templates/elasticsearch.master.yaml.j2      |  77 ++++++
 .../templates/elasticsearch.slave.yaml.j2       |  78 ++++++
 .../templates/elasticsearch_limits.conf.j2      |  20 ++
 .../5.6.2/quicklinks/quicklinks.json            |  43 +++
 .../ELASTICSEARCH/5.6.2/role_command_order.json |   8 +
 .../KIBANA/5.6.2/configuration/kibana-env.xml   |  72 +++++
 .../KIBANA/5.6.2/configuration/kibana-site.xml  | 113 ++++++++
 .../common-services/KIBANA/5.6.2/metainfo.xml   |  84 ++++++
 .../KIBANA/5.6.2/package/scripts/common.py      |  56 ++++
 .../5.6.2/package/scripts/kibana_master.py      |  81 ++++++
 .../KIBANA/5.6.2/package/scripts/params.py      |  50 ++++
 .../KIBANA/5.6.2/quicklinks/quicklinks.json     |  28 ++
 .../src/main/resources/mpack.json               |  76 ++++++
 .../packaging/ambari/metron-mpack/README.md     |  20 +-
 .../src/main/assemblies/metron-mpack.xml        |  14 -
 .../ELASTICSEARCH/5.6.2/metainfo.xml            |  29 --
 .../ELASTICSEARCH/5.6.2/repos/repoinfo.xml      |  45 ----
 .../addon-services/KIBANA/5.6.2/metainfo.xml    |  30 ---
 .../KIBANA/5.6.2/quicklinks/quicklinks.json     |  27 --
 .../KIBANA/5.6.2/repos/repoinfo.xml             |  60 -----
 .../5.6.2/configuration/elastic-env.xml         |  86 ------
 .../5.6.2/configuration/elastic-jvm-options.xml | 144 ----------
 .../5.6.2/configuration/elastic-site.xml        | 198 --------------
 .../5.6.2/configuration/elastic-sysconfig.xml   |  97 -------
 .../5.6.2/configuration/elastic-systemd.xml     |  30 ---
 .../ELASTICSEARCH/5.6.2/metainfo.xml            |  97 -------
 .../5.6.2/package/scripts/elastic_commands.py   | 266 -------------------
 .../5.6.2/package/scripts/elastic_master.py     |  72 -----
 .../5.6.2/package/scripts/elastic_slave.py      |  71 -----
 .../5.6.2/package/scripts/params.py             | 108 --------
 .../5.6.2/package/scripts/properties_config.py  |  34 ---
 .../5.6.2/package/scripts/service_check.py      | 114 --------
 .../5.6.2/package/scripts/status_params.py      |  27 --
 .../templates/elasticsearch.master.yaml.j2      |  77 ------
 .../templates/elasticsearch.slave.yaml.j2       |  78 ------
 .../templates/elasticsearch_limits.conf.j2      |  20 --
 .../5.6.2/quicklinks/quicklinks.json            |  43 ---
 .../ELASTICSEARCH/5.6.2/role_command_order.json |   8 -
 .../KIBANA/5.6.2/configuration/kibana-env.xml   |  72 -----
 .../KIBANA/5.6.2/configuration/kibana-site.xml  | 113 --------
 .../common-services/KIBANA/5.6.2/metainfo.xml   |  94 -------
 .../KIBANA/5.6.2/package/scripts/common.py      |  56 ----
 .../5.6.2/package/scripts/dashboard/__init__.py |  16 --
 .../scripts/dashboard/dashboard-bulkload.json   |  88 ------
 .../package/scripts/dashboard/dashboardindex.py |  95 -------
 .../package/scripts/dashboard/kibana.template   | 233 ----------------
 .../5.6.2/package/scripts/kibana_master.py      | 119 ---------
 .../KIBANA/5.6.2/package/scripts/params.py      |  50 ----
 .../KIBANA/5.6.2/quicklinks/quicklinks.json     |  28 --
 .../common-services/METRON/CURRENT/metainfo.xml |   9 +
 .../package/scripts/dashboard/__init__.py       |  16 ++
 .../scripts/dashboard/dashboard-bulkload.json   |  88 ++++++
 .../package/scripts/dashboard/dashboardindex.py |  95 +++++++
 .../package/scripts/dashboard/kibana.template   | 233 ++++++++++++++++
 .../CURRENT/package/scripts/indexing_master.py  |  32 +++
 metron-deployment/pom.xml                       |   1 +
 85 files changed, 3082 insertions(+), 2786 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/ansible/roles/ambari_master/defaults/main.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/ansible/roles/ambari_master/defaults/main.yml b/metron-deployment/ansible/roles/ambari_master/defaults/main.yml
index 0719b7f..52d220a 100644
--- a/metron-deployment/ansible/roles/ambari_master/defaults/main.yml
+++ b/metron-deployment/ansible/roles/ambari_master/defaults/main.yml
@@ -19,3 +19,5 @@ ambari_server_mem: 2048
 ambari_mpack_version: 0.4.3.0
 metron_mpack_name: metron_mpack-{{ ambari_mpack_version }}.tar.gz
 metron_mpack_path: "{{ playbook_dir }}/../../packaging/ambari/metron-mpack/target/{{ metron_mpack_name }}"
+elasticsearch_mpack_name: elasticsearch_mpack-{{ ambari_mpack_version }}.tar.gz
+elasticsearch_mpack_path: "{{ playbook_dir }}/../../packaging/ambari/elasticsearch-mpack/target/{{ elasticsearch_mpack_name }}"

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/ansible/roles/ambari_master/tasks/elasticsearch_mpack.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/ansible/roles/ambari_master/tasks/elasticsearch_mpack.yml b/metron-deployment/ansible/roles/ambari_master/tasks/elasticsearch_mpack.yml
new file mode 100644
index 0000000..7ce1a13
--- /dev/null
+++ b/metron-deployment/ansible/roles/ambari_master/tasks/elasticsearch_mpack.yml
@@ -0,0 +1,26 @@
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+---
+- name: Copy Elasticsearch MPack to Ambari Host
+  copy:
+    src: "{{ elasticsearch_mpack_path }}"
+    dest: /tmp
+
+- name: Install Elasticsearch MPack on Ambari Host
+  shell: ambari-server install-mpack --mpack=/tmp/elasticsearch_mpack-{{ ambari_mpack_version }}.tar.gz
+  args:
+    creates: /var/lib/ambari-server/resources/mpacks/elasticsearch-ambari.mpack-{{ ambari_mpack_version }}/addon-services

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/ansible/roles/ambari_master/tasks/main.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/ansible/roles/ambari_master/tasks/main.yml b/metron-deployment/ansible/roles/ambari_master/tasks/main.yml
index 77a2e74..b6c351d 100644
--- a/metron-deployment/ansible/roles/ambari_master/tasks/main.yml
+++ b/metron-deployment/ansible/roles/ambari_master/tasks/main.yml
@@ -16,7 +16,8 @@
 #
 ---
 - include: ambari.yml
-- include: mpack.yml
+- include: metron_mpack.yml
+- include: elasticsearch_mpack.yml
 
 - name: start ambari server
   service:

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/ansible/roles/ambari_master/tasks/metron_mpack.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/ansible/roles/ambari_master/tasks/metron_mpack.yml b/metron-deployment/ansible/roles/ambari_master/tasks/metron_mpack.yml
new file mode 100644
index 0000000..a44ea17
--- /dev/null
+++ b/metron-deployment/ansible/roles/ambari_master/tasks/metron_mpack.yml
@@ -0,0 +1,26 @@
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+---
+- name: Copy Metron MPack to Ambari Host
+  copy:
+    src: "{{ metron_mpack_path }}"
+    dest: /tmp
+
+- name: Install Metron MPack on Ambari Host
+  shell: ambari-server install-mpack --mpack=/tmp/metron_mpack-{{ ambari_mpack_version }}.tar.gz
+  args:
+    creates: /var/lib/ambari-server/resources/mpacks/metron-ambari.mpack-{{ ambari_mpack_version }}/addon-services

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/ansible/roles/ambari_master/tasks/mpack.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/ansible/roles/ambari_master/tasks/mpack.yml b/metron-deployment/ansible/roles/ambari_master/tasks/mpack.yml
deleted file mode 100644
index 16289bb..0000000
--- a/metron-deployment/ansible/roles/ambari_master/tasks/mpack.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#  contributor license agreements.  See the NOTICE file distributed with
-#  this work for additional information regarding copyright ownership.
-#  The ASF licenses this file to You under the Apache License, Version 2.0
-#  (the "License"); you may not use this file except in compliance with
-#  the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
----
-- name: Copy MPack to Ambari Host
-  copy:
-    src: "{{ metron_mpack_path }}"
-    dest: /tmp
-
-- name: Install MPack on Ambari Host
-  shell: ambari-server install-mpack --mpack=/tmp/metron_mpack-{{ ambari_mpack_version }}.tar.gz
-  args:
-    creates: /var/lib/ambari-server/resources/mpacks/metron-ambari.mpack-{{ ambari_mpack_version }}/addon-services

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/ansible/roles/load_web_templates/tasks/main.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/ansible/roles/load_web_templates/tasks/main.yml b/metron-deployment/ansible/roles/load_web_templates/tasks/main.yml
index 3a91960..55927c3 100644
--- a/metron-deployment/ansible/roles/load_web_templates/tasks/main.yml
+++ b/metron-deployment/ansible/roles/load_web_templates/tasks/main.yml
@@ -17,7 +17,7 @@
 ---
 - name: Load Kibana Dashboard
   command: >
-    curl -s -w "%{http_code}" -u admin:admin -H "X-Requested-By: ambari" -X POST -d '{ "RequestInfo": { "context": "Install Kibana Dashboard from REST", "command": "LOAD_TEMPLATE"},"Requests/resource_filters": [{"service_name": "KIBANA","component_name": "KIBANA_MASTER","hosts" : "{{ kibana_hosts[0] }}"}]}' http://{{ groups.ambari_master[0] }}:{{ ambari_port }}/api/v1/clusters/{{ cluster_name }}/requests
+    curl -s -w "%{http_code}" -u admin:admin -H "X-Requested-By: ambari" -X POST -d '{ "RequestInfo": { "context": "Install Kibana Dashboard from REST", "command": "KIBANA_DASHBOARD_INSTALL"},"Requests/resource_filters": [{"service_name": "METRON","component_name": "METRON_INDEXING","hosts" : "{{ metron_hosts[0] }}"}]}' http://{{ groups.ambari_master[0] }}:{{ ambari_port }}/api/v1/clusters/{{ cluster_name }}/requests
   args:
     warn: off
   register: result

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/other-examples/manual-install/Manual_Install_CentOS6.md
----------------------------------------------------------------------
diff --git a/metron-deployment/other-examples/manual-install/Manual_Install_CentOS6.md b/metron-deployment/other-examples/manual-install/Manual_Install_CentOS6.md
index e1305b6..1631fac 100644
--- a/metron-deployment/other-examples/manual-install/Manual_Install_CentOS6.md
+++ b/metron-deployment/other-examples/manual-install/Manual_Install_CentOS6.md
@@ -878,11 +878,11 @@ curl -s -w "%{http_code}" -u admin:admin -H "X-Requested-By: ambari" -X POST -d
 
 - Load Kibana Dashboard with:
 ```
-curl -s -w "%{http_code}" -u <USERNAME>:<PASSWORD> -H "X-Requested-By: ambari" -X POST -d '{ "RequestInfo": { "context": "Install Kibana Dashboard from REST", "command": "LOAD_TEMPLATE"},"Requests/resource_filters": [{"service_name": "KIBANA","component_name": "KIBANA_MASTER","hosts" : "<HOSTNAME>"}]}' http://<AMBARI HOST>:8080/api/v1/clusters/<CLUSTERNAME>/requests
+curl -s -w "%{http_code}" -u <USERNAME>:<PASSWORD> -H "X-Requested-By: ambari" -X POST -d '{ "RequestInfo": { "context": "Install Kibana Dashboard from REST", "command": "KIBANA_DASHBOARD_INSTALL"},"Requests/resource_filters": [{"service_name": "METRON","component_name": "METRON_INDEXING","hosts" : "<HOSTNAME>"}]}' http://<AMBARI HOST>:8080/api/v1/clusters/<CLUSTERNAME>/requests
 ```
 For example:
 ```
-curl -s -w "%{http_code}" -u admin:admin -H "X-Requested-By: ambari" -X POST -d '{ "RequestInfo": { "context": "Install Kibana Dashboard from REST", "command": "LOAD_TEMPLATE"},"Requests/resource_filters": [{"service_name": "KIBANA","component_name": "KIBANA_MASTER","hosts" : "metron"}]}' http://192.168.10.10:8080/api/v1/clusters/metron/requests
+curl -s -w "%{http_code}" -u admin:admin -H "X-Requested-By: ambari" -X POST -d '{ "RequestInfo": { "context": "Install Kibana Dashboard from REST", "command": "KIBANA_DASHBOARD_INSTALL"},"Requests/resource_filters": [{"service_name": "METRON","component_name": "METRON_INDEXING","hosts" : "metron"}]}' http://192.168.10.10:8080/api/v1/clusters/metron/requests
 ```
 
 - If you installed Metron on a single node, you might have to increase the number of Storm supervisor slots from the default 2 to 5 or more. This can be done by editing the "supervisor.slots.ports" under Storm in the Ambari UI.

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/README.md b/metron-deployment/packaging/ambari/README.md
index 82e1537..52c7570 100644
--- a/metron-deployment/packaging/ambari/README.md
+++ b/metron-deployment/packaging/ambari/README.md
@@ -30,13 +30,19 @@ limitations under the License.
 * [Upgrading MPack Services](#upgrading-mpack-services)
 
 ## Overview
+
 Typically, Ambari Management Pack development will be done in the Vagrant environments. These instructions are specific to Vagrant, but can be adapted for other environemnts (e.g. make sure to be on the correct nodes for server vs agent files)
 
-There is an `mpack.json` file which describes what services the mpack will contains, versions, etc.
+There are two MPacks:
+
+* Metron - contains artifacts for deploying the Metron service
+* Elasticsearch - contains artifacts for installing Elasticsearch and Kibana services
 
-Alongside this are two directories, `addon-services` and `common-services`.
+There is an `mpack.json` file for each which describes what services the mpack will contain, versions, etc.
 
-The layout of `/common-services/METRON.CURRENT` is
+Alongside this are two directories, `addon-services` and `common-services`. Below the Metron MPack is described, but this also applies similarly to the Elasticsearch MPack.
+
+The layout of `/common-services/METRON/CURRENT` is
 * `/configuration`
   * This contains a set of `*-env.xml` files, relevent to particular components or the service as a whole. These are where properties are defined.
 * `/package`
@@ -59,11 +65,11 @@ The layout of `/common-services/METRON.CURRENT` is
 * `service_advisor.py`
   * Handles component layout and validation, along with handling some configurations for other services or that needs configs from other services.
 
-The layout of `/addon-services/METRON.CURRENT` is
+The layout of `/addon-services/METRON/CURRENT` is
 * `/repos`
   * Contains `repoinfo.xml` that defines repositories to install packages from
 * `metainfo.xml`
-  * Limited info version of `/common-services/METRON.CURRENT/metainfo.xml`
+  * Limited info version of `/common-services/METRON/CURRENT/metainfo.xml`
 * `role_command_order.json`
   * Defines the order of service startup and other actions relative to each other.
 
@@ -361,6 +367,7 @@ Ambari stores the Python files from the service in a couple places. We'll want t
 Specifically, the server files live in
 ```
 /var/lib/ambari-server/resources/mpacks/metron-ambari.mpack-0.4.0.0/common-services
+/var/lib/ambari-server/resources/mpacks/elasticsearch-ambari.mpack-0.4.0.0/common-services
 /var/lib/ambari-agent/cache/common-services
 ```
 
@@ -409,6 +416,8 @@ After we've modified files in Ambari and the mpack is working, it is a good idea
   ```
 1. Install the mpack through Ambari as you normally would
 
+1. The same steps can be followed for Elasticsearch and Kibana by similary deploying the ES MPack located in elasticsearch-mpack/target.
+
 ## Configuration involving dependency services
 Metron can define expectations on other services, e.g. Storm's `topology.classpath` should be `/etc/hbase/conf:/etc/hadoop/conf`.
 This happens in `METRON.CURRENT/service_advisor.py`.
@@ -468,10 +477,12 @@ This is checked in the indexing master
 
 ## Upgrading MPack Services
 
-Apache Metron currently provides three services as part of its MPack
+Apache Metron currently provides one service as part of its Metron MPack
+* Metron
+
+Apache Metron currently provides two services as part of its Elasticsearch MPack
 * Elasticsearch
 * Kibana
-* Metron
 
 There is currently no mechanism provided for multi-version or backwards compatibility. If you upgrade a service, e.g. Elasticsearch 2.x to 5.x, that is the only version that will be
 supported by Ambari via MPack.

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/README.md b/metron-deployment/packaging/ambari/elasticsearch-mpack/README.md
new file mode 100644
index 0000000..e9a20cc
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/README.md
@@ -0,0 +1,62 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+This provides a Management Pack (MPack) extension for [Apache Ambari](https://ambari.apache.org/) that simplifies the provisioning, management and monitoring of Elasticsearch and Kibana on clusters of any size.
+
+This allows you to easily install Elasticsearch and Kibana using a simple, guided process.  This also allows you to monitor cluster health.
+
+### Prerequisites
+
+* Ambari 2.4.2+
+
+### Quick Start
+
+1. Build the Elasticsearch MPack. Execute the following command from the project's root directory.
+    ```
+    mvn clean package -Pmpack -DskipTests
+    ```
+
+1. This results in the Mpack being produced at the following location.
+    ```
+    metron-deployment/packaging/ambari/elasticsearch-mpack/target/elasticsearch_mpack-x.y.z.0.tar.gz
+    ```
+
+1. Copy the tarball to the host where Ambari Server is installed.
+
+1. Ensure that Ambari Server is stopped.
+
+1. Install the MPack.
+    ```
+    ambari-server install-mpack --mpack=elasticsearch_mpack-x.y.z.0.tar.gz --verbose
+    ```
+
+1. Elasticsearch and Kibana will now be available as an installable service within Ambari.
+
+### Installation Notes
+
+The MPack will make all Elasticsearch services available in Ambari in the same manner as any other services in a stack.  These can be installed using Ambari's user interface using "Add Services" or during an initial cluster install.
+
+#### Kerberization
+
+Elasticsearch does not provide free native Kerberos support.
+
+#### Limitations
+
+There are a few limitations that should be addressed to improve the Elasticsearch MPack installation.
+
+* The MPack does not support upgrades.

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/pom.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/pom.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/pom.xml
new file mode 100644
index 0000000..931ce63
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/pom.xml
@@ -0,0 +1,95 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.metron.packaging.mpacks</groupId>
+    <artifactId>elasticsearch_mpack</artifactId>
+    <version>0.4.3.0</version>
+    <name>Elasticsearch Ambari Management Pack</name>
+
+    <parent>
+        <groupId>org.apache.metron</groupId>
+        <artifactId>metron-deployment</artifactId>
+        <version>0.4.3</version>
+        <relativePath>../../..</relativePath>
+    </parent>
+
+    <dependencies>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <artifactId>maven-resources-plugin</artifactId>
+                <version>2.6</version>
+                <executions>
+                    <execution>
+                        <id>copy-resources</id>
+                        <phase>compile</phase>
+                        <goals>
+                            <goal>copy-resources</goal>
+                        </goals>
+                        <configuration>
+                            <outputDirectory>${basedir}/target/</outputDirectory>
+                            <resources>
+                                <resource>
+                                    <directory>${basedir}/src/main/resources</directory>
+                                    <includes>
+                                        <include>mpack.json</include>
+                                    </includes>
+                                    <filtering>true</filtering>
+                                </resource>
+                            </resources>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-jar-plugin</artifactId>
+                <version>${global_jar_version}</version>
+                <executions>
+                    <execution>
+                        <id>default-jar</id>
+                        <!-- put the default-jar in the none phase to skip it from being created -->
+                        <phase>none</phase>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-assembly-plugin</artifactId>
+                <configuration>
+                    <appendAssemblyId>false</appendAssemblyId>
+                    <descriptors>
+                        <descriptor>src/main/assemblies/elasticsearch-mpack.xml</descriptor>
+                    </descriptors>
+                </configuration>
+                <executions>
+                    <execution>
+                        <id>build-tarball</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>single</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/assemblies/elasticsearch-mpack.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/assemblies/elasticsearch-mpack.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/assemblies/elasticsearch-mpack.xml
new file mode 100644
index 0000000..238b028
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/assemblies/elasticsearch-mpack.xml
@@ -0,0 +1,43 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly
+        xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+        xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+        xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
+    <id>archive</id>
+    <formats>
+        <format>tar.gz</format>
+    </formats>
+    <includeBaseDirectory>true</includeBaseDirectory>
+    <fileSets>
+        <fileSet>
+            <directory>src/main/resources/common-services</directory>
+            <outputDirectory>common-services</outputDirectory>
+        </fileSet>
+        <fileSet>
+            <directory>src/main/resources/addon-services</directory>
+            <outputDirectory>addon-services</outputDirectory>
+        </fileSet>
+    </fileSets>
+    <files>
+        <file>
+            <source>target/mpack.json</source>
+        </file>
+    </files>
+    <dependencySets>
+    </dependencySets>
+</assembly>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/metainfo.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/metainfo.xml
new file mode 100755
index 0000000..accf7da
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>ELASTICSEARCH</name>
+            <version>5.6.2</version>
+            <extends>common-services/ELASTICSEARCH/5.6.2</extends>
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/repos/repoinfo.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/repos/repoinfo.xml
new file mode 100644
index 0000000..ba21fb1
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/repos/repoinfo.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+       http://www.apache.org/licenses/LICENSE-2.0
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+    <os family="redhat6">
+        <repo>
+            <baseurl>https://artifacts.elastic.co/packages/5.x/yum</baseurl>
+            <repoid>elasticsearch-5.x</repoid>
+            <reponame>ELASTICSEARCH</reponame>
+        </repo>
+    </os>
+    <os family="redhat7">
+        <repo>
+            <baseurl>https://artifacts.elastic.co/packages/5.x/yum</baseurl>
+            <repoid>elasticsearch-5.x</repoid>
+            <reponame>ELASTICSEARCH</reponame>
+        </repo>
+    </os>
+    <os family="ubuntu14">
+        <!--
+            see details about Ambari fixes for Ubuntu introduced in Ambari 2.6+
+                https://github.com/apache/ambari/commit/f8b29df9685b443d4a5c06c6e1725e4428c95b49#diff-6f26c26ed59462200d018c5e1e71e773
+                https://issues.apache.org/jira/browse/AMBARI-21856
+        -->
+        <repo>
+            <baseurl>https://artifacts.elastic.co/packages/5.x/apt</baseurl>
+            <repoid>elasticsearch-5.x</repoid>
+            <reponame>ELASTICSEARCH</reponame>
+            <distribution>stable</distribution>
+        </repo>
+    </os>
+</reposinfo>
+

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/metainfo.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/metainfo.xml
new file mode 100755
index 0000000..8a4fba2
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/metainfo.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>KIBANA</name>
+            <version>5.6.2</version>
+            <extends>common-services/KIBANA/5.6.2</extends>
+        </service>
+    </services>
+</metainfo>
+

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/quicklinks/quicklinks.json b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/quicklinks/quicklinks.json
new file mode 100755
index 0000000..622a512
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/quicklinks/quicklinks.json
@@ -0,0 +1,27 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"HTTP_ONLY"
+    },
+
+    "links": [
+      {
+        "name": "kibana_master_ui",
+        "label": "Metron Dashboard",
+        "requires_user_name": "false",
+        "url":"%@://%@:%@/",
+        "port":{
+          "http_property": "kibana_server_port",
+          "http_default_port": "5601",
+          "https_property": "kibana_server_port",
+          "https_default_port": "5601",
+          "regex": "^(\\d+)$",
+          "site": "kibana-env"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/repos/repoinfo.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/repos/repoinfo.xml
new file mode 100644
index 0000000..2755818
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/addon-services/KIBANA/5.6.2/repos/repoinfo.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+       http://www.apache.org/licenses/LICENSE-2.0
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+    <os family="redhat6">
+        <repo>
+            <baseurl>https://artifacts.elastic.co/packages/5.x/yum</baseurl>
+            <repoid>kibana-5.x</repoid>
+            <reponame>KIBANA</reponame>
+        </repo>
+        <repo>
+            <baseurl>http://packages.elastic.co/curator/5/centos/6</baseurl>
+            <repoid>ES-Curator-5.x</repoid>
+            <reponame>CURATOR</reponame>
+        </repo>
+    </os>
+    <os family="redhat7">
+        <repo>
+            <baseurl>https://artifacts.elastic.co/packages/5.x/yum</baseurl>
+            <repoid>kibana-5.x</repoid>
+            <reponame>KIBANA</reponame>
+        </repo>
+        <repo>
+            <baseurl>http://packages.elastic.co/curator/5/centos/7</baseurl>
+            <repoid>ES-Curator-5.x</repoid>
+            <reponame>CURATOR</reponame>
+        </repo>
+    </os>
+    <os family="ubuntu14">
+        <!--
+            see details about Ambari fixes for Ubuntu introduced in Ambari 2.6+
+                https://github.com/apache/ambari/commit/f8b29df9685b443d4a5c06c6e1725e4428c95b49#diff-6f26c26ed59462200d018c5e1e71e773
+                https://issues.apache.org/jira/browse/AMBARI-21856
+        -->
+        <repo>
+            <baseurl>https://artifacts.elastic.co/packages/5.x/apt</baseurl>
+            <repoid>kibana-5.x</repoid>
+            <reponame>KIBANA</reponame>
+            <distribution>stable</distribution>
+        </repo>
+        <repo>
+            <baseurl>https://packages.elastic.co/curator/5/debian</baseurl>
+            <repoid>ES-Curator-5.x</repoid>
+            <reponame>CURATOR</reponame>
+            <distribution>stable</distribution>
+        </repo>
+    </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-env.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-env.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-env.xml
new file mode 100755
index 0000000..9e4f8ad
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-env.xml
@@ -0,0 +1,86 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration>
+  <property>
+    <name>elastic_user</name>
+    <value>elasticsearch</value>
+    <property-type>USER</property-type>
+    <description>Service user for Elasticsearch</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>elastic_group</name>
+    <value>elasticsearch</value>
+    <property-type>GROUP</property-type>
+    <description>Service group for Elasticsearch</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>elastic_log_dir</name>
+    <value>/var/log/elasticsearch</value>
+    <description>Log directory for elastic</description>
+  </property>
+  <property>
+    <name>elastic_pid_dir</name>
+    <value>/var/run/elasticsearch</value>
+    <description>The directory for pid files</description>
+  </property>
+  <!-- elasticsearch-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for elastic-env.sh file</description>
+    <value>
+#!/bin/bash
+
+# Set ELASTICSEARCH specific environment variables here.
+
+# The java implementation to use.
+export JAVA_HOME={{java64_home}}
+export PATH=$PATH:$JAVA_HOME/bin
+    </value>
+  </property>
+  <property>
+    <name>elastic_user_nofile_limit</name>
+    <value>65536</value>
+    <description>Max open file limit for Elasticsearch user.</description>
+  </property>
+  <property>
+    <name>elastic_user_nproc_limit</name>
+    <value>2048</value>
+    <description>Max number of processes for Elasticsearch user.</description>
+  </property>
+  <property>
+    <name>elastic_user_memlock_soft_limit</name>
+    <value>unlimited</value>
+    <description>Max locked-in memory address space (soft memlock limit).</description>
+  </property>
+  <property>
+    <name>elastic_user_memlock_hard_limit</name>
+    <value>unlimited</value>
+    <description>Max locked-in memory address space (hard memlock limit).</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-jvm-options.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-jvm-options.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-jvm-options.xml
new file mode 100644
index 0000000..5c6aaca
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-jvm-options.xml
@@ -0,0 +1,144 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration>
+    <property>
+        <name>heap_size</name>
+        <value>512m</value>
+        <description>JVM heap size</description>
+    </property>
+    <property>
+        <name>content</name>
+        <description>The jinja template for the Elasticsearch JVM options file.</description>
+        <value>
+## JVM configuration
+
+################################################################
+## IMPORTANT: JVM heap size
+################################################################
+##
+## You should always set the min and max JVM heap
+## size to the same value. For example, to set
+## the heap to 4 GB, set:
+##
+## -Xms4g
+## -Xmx4g
+##
+## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
+## for more information
+##
+################################################################
+
+# Xms represents the initial size of total heap space
+# Xmx represents the maximum size of total heap space
+
+-Xms{{heap_size}}
+-Xmx{{heap_size}}
+
+################################################################
+## Expert settings
+################################################################
+##
+## All settings below this section are considered
+## expert settings. Don't tamper with them unless
+## you understand what you are doing
+##
+################################################################
+
+## GC configuration
+-XX:+UseConcMarkSweepGC
+-XX:CMSInitiatingOccupancyFraction=75
+-XX:+UseCMSInitiatingOccupancyOnly
+
+## optimizations
+
+# pre-touch memory pages used by the JVM during initialization
+-XX:+AlwaysPreTouch
+
+## basic
+
+# force the server VM (remove on 32-bit client JVMs)
+-server
+
+# explicitly set the stack size (reduce to 320k on 32-bit client JVMs)
+-Xss1m
+
+# set to headless, just in case
+-Djava.awt.headless=true
+
+# ensure UTF-8 encoding by default (e.g. filenames)
+-Dfile.encoding=UTF-8
+
+# use our provided JNA always versus the system one
+-Djna.nosys=true
+
+# use old-style file permissions on JDK9
+-Djdk.io.permissionsUseCanonicalPath=true
+
+# flags to configure Netty
+-Dio.netty.noUnsafe=true
+-Dio.netty.noKeySetOptimization=true
+-Dio.netty.recycler.maxCapacityPerThread=0
+
+# log4j 2
+-Dlog4j.shutdownHookEnabled=false
+-Dlog4j2.disable.jmx=true
+-Dlog4j.skipJansi=true
+
+## heap dumps
+
+# generate a heap dump when an allocation from the Java heap fails
+# heap dumps are created in the working directory of the JVM
+-XX:+HeapDumpOnOutOfMemoryError
+
+# specify an alternative path for heap dumps
+# ensure the directory exists and has sufficient space
+#-XX:HeapDumpPath=${heap.dump.path}
+
+## GC logging
+
+#-XX:+PrintGCDetails
+#-XX:+PrintGCTimeStamps
+#-XX:+PrintGCDateStamps
+#-XX:+PrintClassHistogram
+#-XX:+PrintTenuringDistribution
+#-XX:+PrintGCApplicationStoppedTime
+
+# log GC status to a file with time stamps
+# ensure the directory exists
+#-Xloggc:${loggc}
+
+# By default, the GC log file will not rotate.
+# By uncommenting the lines below, the GC log file
+# will be rotated every 128MB at most 32 times.
+#-XX:+UseGCLogFileRotation
+#-XX:NumberOfGCLogFiles=32
+#-XX:GCLogFileSize=128M
+
+# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
+# If documents were already indexed with unquoted fields in a previous version
+# of Elasticsearch, some operations may throw errors.
+#
+# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
+# only for migration purposes.
+#-Delasticsearch.json.allow_unquoted_field_names=true
+        </value>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-site.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-site.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-site.xml
new file mode 100755
index 0000000..34df1e4
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-site.xml
@@ -0,0 +1,198 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!-- Elastic search  Configurations -->
+
+<configuration supports_final="true">
+    <!-- Configurations -->
+    <property>
+        <name>cluster_name</name>
+        <value>metron</value>
+        <description>Elasticsearch Cluster Name identifies your Elasticsearch subsystem</description>
+    </property>
+    <property>
+        <name>masters_also_are_datanodes</name>
+        <value>"false"</value>
+        <description>ES Masters and Slaves cannot be installed on the same nodes.  Set this to "true" if you want the ES master nodes to serve as combined master/datanodes. Note: surround value in quotes.</description>
+        <value-attributes>
+            <type>string</type>
+        </value-attributes>
+    </property>
+    <property>
+        <name>zen_discovery_ping_unicast_hosts</name>
+        <!--Ideally this gets populated by the list of master eligible nodes (as an acceptable default).  Unsure how to do this.-->
+        <!--Also need to document whether should list masters only, or all ES nodes. I think this one is all nodes, but previous inline comment said Masters.-->
+        <value></value>
+        <description>Unicast discovery list of hosts to act as gossip routers, comma-separated list with square brackets: [ eshost1, eshost2 ]</description>
+    </property>
+    <property>
+        <name>index_number_of_shards</name>
+        <value>4</value>
+        <description>Set the number of shards (splits) of an index.  Changes are not effective after index creation. Usually set to 1 for single-node install.</description>
+    </property>
+    <property>
+        <name>index_number_of_replicas</name>
+        <value>2</value>
+        <description>Set the number of replicas (copies in addition to the first) of an index. Usually set to 0 for single-node install.</description>
+    </property>
+    <property>
+        <name>path_data</name>
+        <value>"/opt/lmm/es_data"</value>
+        <description>Comma-separated list of directories where to store index data allocated for each node: "/mnt/first","/mnt/second".  Number of paths should relate to number of shards, and preferably should be on separate physical volumes.</description>
+    </property>
+    <property>
+        <name>http_cors_enabled</name>
+        <value>"false"</value>
+        <description>Enable or disable cross-origin resource sharing, i.e. whether a browser on another origin can do requests to Elasticsearch. Defaults to false.</description>
+        <value-attributes>
+            <type>string</type>
+        </value-attributes>
+    </property>
+    <property>
+        <name>http_port</name>
+        <value>9200-9300</value>
+        <description>Set a custom port to listen for HTTP traffic</description>
+    </property>
+    <property>
+        <name>transport_tcp_port</name>
+        <value>9300-9400</value>
+        <description>Set a custom port for the node to node communication</description>
+    </property>
+    <!--  Multi-node Discovery -->
+    <property>
+        <name>discovery_zen_ping_timeout</name>
+        <value>3s</value>
+        <description>Wait for ping responses for master discovery</description>
+    </property>
+    <property>
+        <name>discovery_zen_fd_ping_interval</name>
+        <value>15s</value>
+        <description>Wait for ping for cluster discovery</description>
+    </property>
+    <property>
+        <name>discovery_zen_fd_ping_timeout</name>
+        <value>60s</value>
+        <description>Wait for ping for cluster discovery</description>
+    </property>
+    <property>
+        <name>discovery_zen_fd_ping_retries</name>
+        <value>5</value>
+        <description>Number of ping retries before blacklisting</description>
+    </property>
+    <!--  Gateway -->
+    <property>
+        <name>gateway_recover_after_data_nodes</name>
+        <value>3</value>
+        <description>Recover as long as this many data or master nodes have joined the cluster.</description>
+    </property>
+    <property>
+        <name>recover_after_time</name>
+        <value>15m</value>
+        <description>recover_after_time</description>
+    </property>
+    <property>
+        <name>expected_data_nodes</name>
+        <value>0</value>
+        <description>expected_data_nodes</description>
+    </property>
+    <!--  Index -->  
+    <property>
+        <name>index_merge_scheduler_max_thread_count</name>
+        <value>5</value>
+        <description>index.merge.scheduler.max_thread_count</description>
+    </property>
+    <property>
+        <name>indices_memory_index_store_throttle_type</name>
+        <value>none</value>
+        <description>index_store_throttle_type</description>
+    </property>
+    <property>
+        <name>index_refresh_interval</name>
+        <value>1s</value>
+        <description>index refresh interval</description>
+    </property>
+    <property>
+        <name>index_translog_flush_threshold_size</name>
+        <value>5g</value>
+        <description>index_translog_flush_threshold_size</description>
+    </property>
+    <property>
+        <name>indices_memory_index_buffer_size</name>
+        <value>10%</value>
+        <description>Percentage of heap used for write buffers</description>
+    </property>
+    <property>
+        <name>bootstrap_memory_lock</name>
+        <value>true</value>
+        <description>The third option on Linux/Unix systems only, is to use mlockall to try to lock the process address space into RAM, preventing any Elasticsearch memory from being swapped out</description>
+    </property>
+    <property>
+        <name>threadpool_bulk_queue_size</name>
+        <value>3000</value>
+        <description>It tells ES the number of  requests that can be queued for execution in the node when there is no thread available to execute a bulk request</description>
+    </property>
+    <property>
+        <name>threadpool_index_queue_size</name>
+        <value>1000</value>
+        <description>It tells ES the number of  requests that can be queued for execution in the node when there is no thread available to execute index request</description>
+    </property>
+    <property>
+        <name>indices_cluster_send_refresh_mapping</name>
+        <value>false</value>
+        <description>In order to make the index request more efficient, we have set this property on our data nodes</description>
+    </property>
+    <property>
+        <name>indices_fielddata_cache_size</name>
+        <value>25%</value>
+        <description>You need to keep in mind that not setting this value properly can cause:Facet searches and sorting to have very poor performance:The ES node to run out of memory if you run the facet query against a large index</description>
+    </property>
+    <property>
+        <name>cluster_routing_allocation_disk_watermark_high</name>
+        <value>0.99</value>
+        <description>Property used when multiple drives are used to understand max thresholds</description>
+    </property>
+    <property>
+        <name>cluster_routing_allocation_disk_threshold_enabled</name>
+        <value>true</value>
+        <description>Property used when multiple drives are used to understand if thresholding is active</description>
+    </property>   
+   <property>
+        <name>cluster_routing_allocation_disk_watermark_low</name>
+        <value>.97</value>
+        <description>Property used when multiple drives are used to understand min thresholds</description>
+    </property>
+    <property>
+        <name>cluster_routing_allocation_node_concurrent_recoveries</name>
+        <value>4</value>
+        <description>Max concurrent recoveries, useful for fast recovery of the cluster nodes on restart</description>
+    </property>
+    <property>
+        <name>network_host</name>
+        <value>[ _local_, _site_ ]</value>
+        <description>Network interface(s) ES will bind to within each node. "_site_" or a more specific external address is required for all multi-node clusters, and also recommended for single-node installs to allow access to ES reports from non-local hosts. Always include the square brackets. See https://www.elastic.co/guide/en/elasticsearch/reference/2.3/modules-network.html for ES documentation.</description>
+    </property>
+    <property>
+        <name>network_publish_host</name>
+        <value>[]</value>
+        <value-attributes>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
+        <description>Network address ES will publish for client and peer use. Empty value causes it to pick from the values in network_host, which works in most simple environments. MUST set explicitly for MULTI-HOMED SYSTEMS. See https://www.elastic.co/guide/en/elasticsearch/reference/2.3/modules-network.html for ES documentation.</description>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-sysconfig.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-sysconfig.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-sysconfig.xml
new file mode 100755
index 0000000..ea6ca38
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-sysconfig.xml
@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration>
+    <property>
+        <name>elastic_home</name>
+        <value>/usr/share/elasticsearch/</value>
+        <description>Elasticsearch Home Directory</description>
+    </property>
+    <property>
+        <name>data_dir</name>
+        <value>/var/lib/elasticsearch/</value>
+        <description>Elasticsearch Data Directory</description>
+    </property>
+    <property>
+        <name>work_dir</name>
+        <value>/tmp/elasticsearch/</value>
+        <description>Elasticsearch Work Directory</description>
+    </property>
+    <property>
+        <name>conf_dir</name>
+        <value>/etc/elasticsearch/</value>
+        <description>Elasticsearch Configuration Directory</description>
+    </property>
+    <property>
+        <name>max_open_files</name>
+        <value>65536</value>
+        <description>Maximum number of open files</description>
+    </property>
+    <property>
+        <name>max_map_count</name>
+        <value>262144</value>
+        <description>Maximum number of memory map areas for process</description>
+    </property>
+
+    <!-- Elasticsearch sysconfig -->
+    <property>
+        <name>content</name>
+        <description>This is the jinja template for elastic sysconfig file</description>
+        <value>
+# Directory where the Elasticsearch binary distribution resides
+ES_HOME={{elastic_home}}
+
+# Maximum number of open files
+MAX_OPEN_FILES={{max_open_files}}
+
+# Maximum number of VMA (Virtual Memory Areas) a process can own
+MAX_MAP_COUNT={{max_map_count}}
+
+# Elasticsearch log directory
+LOG_DIR={{log_dir}}
+
+# Elasticsearch data directory
+DATA_DIR={{data_dir}}
+
+# Elasticsearch work directory
+WORK_DIR={{work_dir}}
+
+# Elasticsearch conf directory
+CONF_DIR={{conf_dir}}
+
+# User to run as, change this to a specific elasticsearch user if possible
+# Also make sure, this user can write into the log directories in case you change them
+# This setting only works for the init script, but has to be configured separately for systemd startup
+ES_USER={{elastic_user}}
+
+# Elasticsearch pid directory
+PID_DIR={{pid_dir}}
+
+# JAVA_HOME must be provided here for OS that use systemd service launch
+JAVA_HOME={{java64_home}}
+
+# Additional Java options - now preferential to use 'jvm.options' file instead
+ES_JAVA_OPTS=""
+
+# https://www.elastic.co/guide/en/elasticsearch/reference/5.6/_memory_lock_check.html
+MAX_LOCKED_MEMORY=unlimited
+        </value>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-systemd.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-systemd.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-systemd.xml
new file mode 100644
index 0000000..311e3c0
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-systemd.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration>
+    <property>
+        <name>content</name>
+        <description>The jinja template for the Elasticsearch systemd override file.  Applies only to platforms that use systemd.</description>
+        <value>
+[Service]
+LimitMEMLOCK=infinity
+        </value>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/metainfo.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/metainfo.xml
new file mode 100755
index 0000000..47abb45
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/metainfo.xml
@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>ELASTICSEARCH</name>
+            <displayName>Elasticsearch</displayName>
+            <comment>Indexing and Search</comment>
+            <version>5.6.2</version>
+            <components>
+                <component>
+                    <name>ES_MASTER</name>
+                    <displayName>Elasticsearch Master</displayName>
+                    <category>MASTER</category>
+                    <cardinality>1+</cardinality>
+                    <commandScript>
+                        <script>scripts/elastic_master.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                </component>
+                <component>
+                    <name>ES_SLAVE</name>
+                    <displayName>Elasticsearch Data Node</displayName>
+                    <category>SLAVE</category>
+                    <cardinality>0+</cardinality>
+                    <commandScript>
+                        <script>scripts/elastic_slave.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                </component>
+            </components>
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat6</osFamily>
+                    <packages>
+                        <package>
+                            <name>elasticsearch-5.6.2</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+                <osSpecific>
+                    <osFamily>redhat7</osFamily>
+                    <packages>
+                        <package>
+                            <name>elasticsearch-5.6.2</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+                <osSpecific>
+                    <osFamily>ubuntu14</osFamily>
+                    <packages>
+                        <package>
+                            <name>elasticsearch=5.6.2</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+            </osSpecifics>
+            <commandScript>
+                <script>scripts/service_check.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>300</timeout>
+            </commandScript>
+            <configuration-dependencies>
+                <config-type>elastic-env</config-type>
+                <config-type>elastic-site</config-type>
+                <config-type>elastic-sysconfig</config-type>
+                <config-type>elastic-systemd</config-type>
+                <config-type>elastic-jvm-options</config-type>
+            </configuration-dependencies>
+            <restartRequiredAfterChange>true</restartRequiredAfterChange>
+            <quickLinksConfigurations>
+                <quickLinksConfiguration>
+                    <fileName>quicklinks.json</fileName>
+                    <default>true</default>
+                </quickLinksConfiguration>
+            </quickLinksConfigurations>
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_commands.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_commands.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_commands.py
new file mode 100644
index 0000000..618d10a
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_commands.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from ambari_commons.os_check import OSCheck
+from resource_management.core.exceptions import ExecutionFailed
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import File
+from resource_management.core.source import InlineTemplate
+from resource_management.core.source import Template
+from resource_management.core.resources import User
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions import format as ambari_format
+from resource_management.libraries.functions.get_user_call_output import get_user_call_output
+
+
+def service_check(cmd, user, label):
+    """
+    Executes a SysV service check command that adheres to LSB-compliant
+    return codes.  The return codes are interpreted as defined
+    by the LSB.
+
+    See http://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/iniscrptact.html
+    for more information.
+
+    :param cmd: The service check command to execute.
+    :param label: The name of the service.
+    """
+    Logger.info("Performing service check; cmd={0}, user={1}, label={2}".format(cmd, user, label))
+    rc, out, err = get_user_call_output(cmd, user, is_checked_call=False)
+
+    if rc in [1, 2, 3]:
+      # if return code in [1, 2, 3], then 'program is not running' or 'program is dead'
+      Logger.info("{0} is not running".format(label))
+      raise ComponentIsNotRunning()
+
+    elif rc == 0:
+      # if return code = 0, then 'program is running or service is OK'
+      Logger.info("{0} is running".format(label))
+
+    else:
+      # else service state is unknown
+      err_msg = "{0} service check failed; cmd '{1}' returned {2}".format(label, cmd, rc)
+      Logger.error(err_msg)
+      raise ExecutionFailed(err_msg, rc, out, err)
+
+def is_systemd_running():
+    """
+    Determines if the platform is running Systemd.
+    :return True, if the platform is running Systemd.  False, otherwise.
+    """
+    Logger.info("Is the platform running Systemd?")
+    rc, out, err = get_user_call_output("pidof systemd", "root", is_checked_call=False)
+    if rc == 0:
+        Logger.info("Systemd was found")
+        return True
+    else:
+        Logger.info("Systemd was NOT found")
+        return False
+
+def configure_systemd(params):
+    """
+    Configure Systemd for Elasticsearch.
+    """
+    Logger.info("Configuring Systemd for Elasticsearch");
+
+    # ensure the systemd directory for elasticsearch overrides exists
+    Logger.info("Create Systemd directory for Elasticsearch: {0}".format(params.systemd_elasticsearch_dir))
+    Directory(params.systemd_elasticsearch_dir,
+              create_parents=True,
+              owner='root',
+              group='root')
+
+    # when using Elasticsearch packages on systems that use systemd, system
+    # limits must also be specified via systemd.
+    # see https://www.elastic.co/guide/en/elasticsearch/reference/5.6/setting-system-settings.html#systemd
+    Logger.info("Elasticsearch systemd limits: {0}".format(params.systemd_override_file))
+    File(params.systemd_override_file,
+         content=InlineTemplate(params.systemd_override_template),
+         owner="root",
+         group="root")
+
+    # reload the configuration
+    Execute("systemctl daemon-reload")
+
+def create_user(params):
+    """
+    Creates the user required for Elasticsearch.
+    """
+    Logger.info("Creating user={0} in group={1}".format(params.elastic_user, params.elastic_group))
+    User(params.elastic_user, action = "create", groups = params.elastic_group)
+
+def create_directories(params, directories):
+    """
+    Creates one or more directories.
+    """
+    Logger.info("Creating directories: {0}".format(directories))
+    Directory(directories,
+              create_parents=True,
+              mode=0755,
+              owner=params.elastic_user,
+              group=params.elastic_group
+              )
+
+def create_elastic_env(params):
+    """
+    Creates the Elasticsearch environment file.
+    """
+    Logger.info("Create Elasticsearch environment file.")
+    File("{0}/elastic-env.sh".format(params.conf_dir),
+         owner=params.elastic_user,
+         group=params.elastic_group,
+         content=InlineTemplate(params.elastic_env_sh_template))
+
+def create_elastic_site(params, template_name):
+    """
+    Creates the Elasticsearch site file.
+    """
+    Logger.info("Creating Elasticsearch site file; template={0}".format(template_name))
+
+    elastic_site = params.config['configurations']['elastic-site']
+    path = "{0}/elasticsearch.yml".format(params.conf_dir)
+    template = Template(template_name, configurations=elastic_site)
+    File(path,
+         content=template,
+         owner=params.elastic_user,
+         group=params.elastic_group)
+
+def get_elastic_config_path(default="/etc/default/elasticsearch"):
+    """
+    Defines the path to the Elasticsearch environment file.  This path will
+    differ based on the OS family.
+    :param default: The path used if the OS family is not recognized.
+    """
+    path = default
+    if OSCheck.is_redhat_family():
+      path = "/etc/sysconfig/elasticsearch"
+    elif OSCheck.is_ubuntu_family():
+      path = "/etc/default/elasticsearch"
+    else:
+      Logger.error("Unexpected OS family; using default path={0}".format(path))
+
+    return path
+
+def create_elastic_config(params):
+    """
+    Creates the Elasticsearch system config file.  Usually lands at either
+    /etc/sysconfig/elasticsearch or /etc/default/elasticsearch.
+    """
+    path = get_elastic_config_path()
+    Logger.info("Creating the Elasticsearch system config; path={0}".format(path))
+    File(path, owner="root", group="root", content=InlineTemplate(params.sysconfig_template))
+
+def create_elastic_pam_limits(params):
+    """
+    Creates the PAM limits for Elasticsearch.
+    """
+    Logger.info("Creating Elasticsearch PAM limits.")
+
+    # in some OS this folder may not exist, so create it
+    Logger.info("Ensure PAM limits directory exists: {0}".format(params.limits_conf_dir))
+    Directory(params.limits_conf_dir,
+              create_parents=True,
+              owner='root',
+              group='root')
+
+    Logger.info("Creating Elasticsearch PAM limits; file={0}".format(params.limits_conf_file))
+    File(params.limits_conf_file,
+         content=Template('elasticsearch_limits.conf.j2'),
+         owner="root",
+         group="root")
+
+def create_elastic_jvm_options(params):
+    """
+    Defines the jvm.options file used to specify JVM options.
+    """
+    path = "{0}/jvm.options".format(params.conf_dir)
+    Logger.info("Creating Elasticsearch JVM Options; file={0}".format(path))
+    File(path,
+         content=InlineTemplate(params.jvm_options_template),
+         owner=params.elastic_user,
+         group=params.elastic_group)
+
+def get_data_directories(params):
+    """
+    Returns the directories to use for storing Elasticsearch data.
+    """
+    path = params.path_data
+    path = path.replace('"', '')
+    path = path.replace(' ', '')
+    path = path.split(',')
+    dirs = [p.replace('"', '') for p in path]
+
+    Logger.info("Elasticsearch data directories: dirs={0}".format(dirs))
+    return dirs
+
+def configure_master():
+    """
+    Configures the Elasticsearch master node.
+    """
+    import params
+
+    # define the directories required
+    dirs = [
+      params.log_dir,
+      params.pid_dir,
+      params.conf_dir,
+      "{0}/scripts".format(params.conf_dir)
+    ]
+    dirs += get_data_directories(params)
+
+    # configure the elasticsearch master
+    create_user(params)
+    create_directories(params, dirs)
+    create_elastic_env(params)
+    create_elastic_site(params,  "elasticsearch.master.yaml.j2")
+    create_elastic_config(params)
+    create_elastic_pam_limits(params)
+    create_elastic_jvm_options(params)
+    if is_systemd_running():
+        configure_systemd(params)
+
+def configure_slave():
+    """
+    Configures the Elasticsearch slave node.
+    """
+    import params
+
+    # define the directories required
+    dirs = [
+      params.log_dir,
+      params.pid_dir,
+      params.conf_dir,
+    ]
+    dirs += get_data_directories(params)
+
+    # configure the elasticsearch slave
+    create_user(params)
+    create_directories(params, dirs)
+    create_elastic_env(params)
+    create_elastic_site(params, "elasticsearch.slave.yaml.j2")
+    create_elastic_config(params)
+    create_elastic_pam_limits(params)
+    create_elastic_jvm_options(params)
+    if is_systemd_running():
+        configure_systemd(params)

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_master.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_master.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_master.py
new file mode 100755
index 0000000..142ce4e
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_master.py
@@ -0,0 +1,72 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core import shell
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.script import Script
+from resource_management.core.logger import Logger
+from elastic_commands import service_check
+from elastic_commands import configure_master
+
+class Elasticsearch(Script):
+
+    def install(self, env):
+        import params
+        env.set_params(params)
+        Logger.info('Install Elasticsearch master node')
+        self.install_packages(env)
+
+    def configure(self, env, upgrade_type=None, config_dir=None):
+        import params
+        env.set_params(params)
+        Logger.info('Configure Elasticsearch master node')
+        configure_master()
+
+    def stop(self, env, upgrade_type=None):
+        import params
+        env.set_params(params)
+        Logger.info('Stop Elasticsearch master node')
+        Execute("service elasticsearch stop")
+
+    def start(self, env, upgrade_type=None):
+        import params
+        env.set_params(params)
+        Logger.info('Start Elasticsearch master node')
+        self.configure(env)
+        Execute("service elasticsearch start")
+
+    def status(self, env):
+        import params
+        env.set_params(params)
+        Logger.info('Status check Elasticsearch master node')
+        service_check(
+          cmd="service elasticsearch status",
+          user=params.elastic_status_check_user,
+          label="Elasticsearch Master")
+
+    def restart(self, env):
+        import params
+        env.set_params(params)
+        Logger.info('Restart Elasticsearch master node')
+        self.configure(env)
+        Execute("service elasticsearch restart")
+
+
+if __name__ == "__main__":
+    Elasticsearch().execute()

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_slave.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_slave.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_slave.py
new file mode 100755
index 0000000..2d559ff
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_slave.py
@@ -0,0 +1,71 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.script import Script
+
+from elastic_commands import service_check
+from elastic_commands import configure_slave
+
+class Elasticsearch(Script):
+
+    def install(self, env):
+        import params
+        env.set_params(params)
+        Logger.info('Install Elasticsearch slave node')
+        self.install_packages(env)
+
+    def configure(self, env, upgrade_type=None, config_dir=None):
+        import params
+        env.set_params(params)
+        Logger.info('Configure Elasticsearch slave node')
+        configure_slave()
+
+    def stop(self, env, upgrade_type=None):
+        import params
+        env.set_params(params)
+        Logger.info('Stop Elasticsearch slave node')
+        Execute("service elasticsearch stop")
+
+    def start(self, env, upgrade_type=None):
+        import params
+        env.set_params(params)
+        self.configure(env)
+        Execute("service elasticsearch start")
+
+    def status(self, env):
+        import params
+        env.set_params(params)
+        Logger.info('Status check Elasticsearch slave node')
+        service_check(
+          cmd="service elasticsearch status",
+          user=params.elastic_status_check_user,
+          label="Elasticsearch Slave")
+
+    def restart(self, env):
+        import params
+        env.set_params(params)
+        Logger.info('Restart Elasticsearch slave node')
+        self.configure(env)
+        Execute("service elasticsearch restart")
+
+
+if __name__ == "__main__":
+    Elasticsearch().execute()


[50/50] [abbrv] metron git commit: Merge remote-tracking branch 'origin/master' into feature/METRON-1416-upgrade-solr

Posted by rm...@apache.org.
Merge remote-tracking branch 'origin/master' into feature/METRON-1416-upgrade-solr


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/d0a4e4c0
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/d0a4e4c0
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/d0a4e4c0

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: d0a4e4c0f15d6b05371e97be19177729b1b33243
Parents: f8d7843 2b4f0b8
Author: merrimanr <me...@gmail.com>
Authored: Thu Apr 26 14:44:44 2018 -0500
Committer: merrimanr <me...@gmail.com>
Committed: Fri Apr 27 14:26:43 2018 -0500

----------------------------------------------------------------------
 dependencies_with_url.csv                       |   7 +-
 dev-utilities/committer-utils/README.md         |  75 +--
 dev-utilities/committer-utils/prepare-commit    |  77 ++-
 .../client/stellar/ProfilerFunctions.java       |  14 +-
 .../profiler/DefaultMessageDistributor.java     | 235 +++++++-
 .../metron/profiler/DefaultProfileBuilder.java  | 115 ++--
 .../metron/profiler/MessageDistributor.java     |  48 +-
 .../apache/metron/profiler/MessageRoute.java    |  19 +-
 .../apache/metron/profiler/MessageRouter.java   |  11 +-
 .../apache/metron/profiler/ProfileBuilder.java  |  34 +-
 .../metron/profiler/ProfileMeasurement.java     |   6 +-
 .../metron/profiler/StandAloneProfiler.java     | 100 +++-
 .../org/apache/metron/profiler/clock/Clock.java |  18 +-
 .../metron/profiler/clock/ClockFactory.java     |  38 ++
 .../profiler/clock/DefaultClockFactory.java     |  57 ++
 .../metron/profiler/clock/EventTimeClock.java   |  72 +++
 .../metron/profiler/clock/FixedClock.java       |  39 +-
 .../profiler/clock/FixedClockFactory.java       |  44 ++
 .../apache/metron/profiler/clock/WallClock.java |  17 +-
 .../profiler/DefaultMessageDistributorTest.java | 171 +++++-
 .../profiler/DefaultProfileBuilderTest.java     | 119 ++--
 .../metron/profiler/ProfilePeriodTest.java      |   1 -
 .../metron/profiler/StandAloneProfilerTest.java | 255 ++++++++
 .../profiler/clock/DefaultClockFactoryTest.java |  75 +++
 .../profiler/clock/EventTimeClockTest.java      | 115 ++++
 .../metron/profiler/clock/WallClockTest.java    |  54 ++
 metron-analytics/metron-profiler/README.md      | 108 +++-
 .../src/main/config/profiler.properties         |  14 +-
 .../src/main/flux/profiler/remote.yaml          |  50 +-
 .../profiler/bolt/DestinationHandler.java       |  56 --
 .../bolt/FixedFrequencyFlushSignal.java         | 135 +++++
 .../metron/profiler/bolt/FlushSignal.java       |  51 ++
 .../profiler/bolt/HBaseDestinationHandler.java  |  58 --
 .../metron/profiler/bolt/HBaseEmitter.java      |  73 +++
 .../profiler/bolt/KafkaDestinationHandler.java  | 110 ----
 .../metron/profiler/bolt/KafkaEmitter.java      | 164 +++++
 .../metron/profiler/bolt/ManualFlushSignal.java |  54 ++
 .../profiler/bolt/ProfileBuilderBolt.java       | 404 ++++++++++---
 .../bolt/ProfileMeasurementEmitter.java         |  59 ++
 .../profiler/bolt/ProfileSplitterBolt.java      | 136 ++++-
 .../zookeeper/event-time-test/profiler.json     |  12 +
 .../config/zookeeper/percentiles/profiler.json  |  12 -
 .../processing-time-test/profiler.json          |  11 +
 .../zookeeper/readme-example-1/profiler.json    |  17 -
 .../zookeeper/readme-example-2/profiler.json    |  18 -
 .../zookeeper/readme-example-3/profiler.json    |  11 -
 .../zookeeper/readme-example-4/profiler.json    |  11 -
 .../bolt/FixedFrequencyFlushSignalTest.java     |  71 +++
 .../metron/profiler/bolt/HBaseEmitterTest.java  | 120 ++++
 .../bolt/KafkaDestinationHandlerTest.java       | 203 -------
 .../metron/profiler/bolt/KafkaEmitterTest.java  | 291 +++++++++
 .../profiler/bolt/ProfileBuilderBoltTest.java   | 468 ++++++++-------
 .../profiler/bolt/ProfileHBaseMapperTest.java   |   6 +-
 .../profiler/bolt/ProfileSplitterBoltTest.java  | 288 +++++++--
 .../profiler/integration/MessageBuilder.java    |  75 +++
 .../integration/ProfilerIntegrationTest.java    | 329 +++++-----
 metron-contrib/metron-performance/README.md     | 205 +++++++
 .../performance_measurement.png                 | Bin 0 -> 5790 bytes
 metron-contrib/metron-performance/pom.xml       | 134 +++++
 .../src/main/assembly/assembly.xml              |  42 ++
 .../metron/performance/load/LoadGenerator.java  | 175 ++++++
 .../metron/performance/load/LoadOptions.java    | 499 ++++++++++++++++
 .../performance/load/MessageGenerator.java      |  48 ++
 .../metron/performance/load/SendToKafka.java    | 107 ++++
 .../load/monitor/AbstractMonitor.java           |  49 ++
 .../load/monitor/EPSGeneratedMonitor.java       |  53 ++
 .../monitor/EPSThroughputWrittenMonitor.java    |  77 +++
 .../performance/load/monitor/MonitorNaming.java |  23 +
 .../performance/load/monitor/MonitorTask.java   |  44 ++
 .../performance/load/monitor/Results.java       |  51 ++
 .../load/monitor/writers/CSVWriter.java         |  67 +++
 .../load/monitor/writers/ConsoleWriter.java     |  65 ++
 .../load/monitor/writers/Writable.java          |  40 ++
 .../load/monitor/writers/Writer.java            |  86 +++
 .../performance/sampler/BiasedSampler.java      | 113 ++++
 .../metron/performance/sampler/Sampler.java     |  24 +
 .../performance/sampler/UnbiasedSampler.java    |  28 +
 .../metron/performance/util/KafkaUtil.java      |  56 ++
 .../src/main/scripts/load_tool.sh               |  36 ++
 .../performance/load/LoadOptionsTest.java       |  93 +++
 .../performance/load/SendToKafkaTest.java       |  49 ++
 .../metron/performance/sampler/SamplerTest.java | 145 +++++
 metron-contrib/pom.xml                          |  15 +
 metron-deployment/Kerberos-manual-setup.md      | 209 +++++++
 metron-deployment/amazon-ec2/README.md          |  90 +--
 metron-deployment/amazon-ec2/playbook.yml       |   4 +-
 .../ansible/playbooks/metron_full_install.yml   |   4 +-
 .../roles/ambari_master/defaults/main.yml       |   2 +
 .../ambari_master/tasks/elasticsearch_mpack.yml |  26 +
 .../ansible/roles/ambari_master/tasks/main.yml  |   3 +-
 .../roles/ambari_master/tasks/metron_mpack.yml  |  26 +
 .../ansible/roles/ambari_master/tasks/mpack.yml |  26 -
 .../roles/load_web_templates/tasks/main.yml     |   2 +-
 .../roles/metron-builder/tasks/build-debs.yml   |   2 +-
 .../roles/metron-builder/tasks/build-rpms.yml   |   2 +-
 metron-deployment/development/README.md         |   5 +
 metron-deployment/development/centos6/README.md |   4 +-
 .../development/centos6/Vagrantfile             |  22 +-
 .../development/centos6/ansible/playbook.yml    |  23 +
 .../development/ubuntu14/README.md              |   4 +-
 .../development/ubuntu14/Vagrantfile            |  16 +-
 .../manual-install/Manual_Install_CentOS6.md    |   4 +-
 metron-deployment/packaging/ambari/README.md    | 193 +++---
 .../ambari/elasticsearch-mpack/README.md        |  62 ++
 .../ambari/elasticsearch-mpack/pom.xml          |  95 +++
 .../src/main/assemblies/elasticsearch-mpack.xml |  43 ++
 .../ELASTICSEARCH/5.6.2/metainfo.xml            |  29 +
 .../ELASTICSEARCH/5.6.2/repos/repoinfo.xml      |  45 ++
 .../addon-services/KIBANA/5.6.2/metainfo.xml    |  30 +
 .../KIBANA/5.6.2/quicklinks/quicklinks.json     |  27 +
 .../KIBANA/5.6.2/repos/repoinfo.xml             |  60 ++
 .../5.6.2/configuration/elastic-env.xml         |  86 +++
 .../5.6.2/configuration/elastic-jvm-options.xml | 144 +++++
 .../5.6.2/configuration/elastic-site.xml        | 198 +++++++
 .../5.6.2/configuration/elastic-sysconfig.xml   |  97 +++
 .../5.6.2/configuration/elastic-systemd.xml     |  30 +
 .../ELASTICSEARCH/5.6.2/metainfo.xml            |  97 +++
 .../5.6.2/package/scripts/elastic_commands.py   | 266 +++++++++
 .../5.6.2/package/scripts/elastic_master.py     |  72 +++
 .../5.6.2/package/scripts/elastic_slave.py      |  71 +++
 .../5.6.2/package/scripts/params.py             | 108 ++++
 .../5.6.2/package/scripts/properties_config.py  |  34 ++
 .../5.6.2/package/scripts/service_check.py      | 114 ++++
 .../5.6.2/package/scripts/status_params.py      |  27 +
 .../templates/elasticsearch.master.yaml.j2      |  77 +++
 .../templates/elasticsearch.slave.yaml.j2       |  78 +++
 .../templates/elasticsearch_limits.conf.j2      |  20 +
 .../5.6.2/quicklinks/quicklinks.json            |  43 ++
 .../ELASTICSEARCH/5.6.2/role_command_order.json |   8 +
 .../KIBANA/5.6.2/configuration/kibana-env.xml   |  72 +++
 .../KIBANA/5.6.2/configuration/kibana-site.xml  | 113 ++++
 .../common-services/KIBANA/5.6.2/metainfo.xml   |  84 +++
 .../KIBANA/5.6.2/package/scripts/common.py      |  56 ++
 .../5.6.2/package/scripts/kibana_master.py      |  81 +++
 .../KIBANA/5.6.2/package/scripts/params.py      |  50 ++
 .../KIBANA/5.6.2/quicklinks/quicklinks.json     |  28 +
 .../src/main/resources/mpack.json               |  76 +++
 .../packaging/ambari/metron-mpack/README.md     |  20 +-
 .../packaging/ambari/metron-mpack/pom.xml       |   7 +-
 .../src/main/assemblies/metron-mpack.xml        |  14 -
 .../ELASTICSEARCH/5.6.2/metainfo.xml            |  29 -
 .../ELASTICSEARCH/5.6.2/repos/repoinfo.xml      |  45 --
 .../addon-services/KIBANA/5.6.2/metainfo.xml    |  30 -
 .../KIBANA/5.6.2/quicklinks/quicklinks.json     |  27 -
 .../KIBANA/5.6.2/repos/repoinfo.xml             |  60 --
 .../5.6.2/configuration/elastic-env.xml         |  86 ---
 .../5.6.2/configuration/elastic-jvm-options.xml | 144 -----
 .../5.6.2/configuration/elastic-site.xml        | 198 -------
 .../5.6.2/configuration/elastic-sysconfig.xml   |  97 ---
 .../5.6.2/configuration/elastic-systemd.xml     |  30 -
 .../ELASTICSEARCH/5.6.2/metainfo.xml            |  97 ---
 .../5.6.2/package/scripts/elastic_commands.py   | 266 ---------
 .../5.6.2/package/scripts/elastic_master.py     |  72 ---
 .../5.6.2/package/scripts/elastic_slave.py      |  71 ---
 .../5.6.2/package/scripts/params.py             | 108 ----
 .../5.6.2/package/scripts/properties_config.py  |  34 --
 .../5.6.2/package/scripts/service_check.py      | 114 ----
 .../5.6.2/package/scripts/status_params.py      |  27 -
 .../templates/elasticsearch.master.yaml.j2      |  77 ---
 .../templates/elasticsearch.slave.yaml.j2       |  78 ---
 .../templates/elasticsearch_limits.conf.j2      |  20 -
 .../5.6.2/quicklinks/quicklinks.json            |  43 --
 .../ELASTICSEARCH/5.6.2/role_command_order.json |   8 -
 .../KIBANA/5.6.2/configuration/kibana-env.xml   |  72 ---
 .../KIBANA/5.6.2/configuration/kibana-site.xml  | 113 ----
 .../common-services/KIBANA/5.6.2/metainfo.xml   |  94 ---
 .../KIBANA/5.6.2/package/scripts/common.py      |  56 --
 .../5.6.2/package/scripts/dashboard/__init__.py |  16 -
 .../scripts/dashboard/dashboard-bulkload.json   |  88 ---
 .../package/scripts/dashboard/dashboardindex.py |  95 ---
 .../package/scripts/dashboard/kibana.template   | 233 --------
 .../5.6.2/package/scripts/kibana_master.py      | 119 ----
 .../KIBANA/5.6.2/package/scripts/params.py      |  50 --
 .../KIBANA/5.6.2/quicklinks/quicklinks.json     |  28 -
 .../configuration/metron-enrichment-env.xml     | 130 +++-
 .../METRON/CURRENT/configuration/metron-env.xml |   2 -
 .../configuration/metron-profiler-env.xml       |  77 ++-
 .../CURRENT/configuration/metron-rest-env.xml   |  12 +
 .../common-services/METRON/CURRENT/metainfo.xml |  20 +
 .../package/scripts/dashboard/__init__.py       |  16 +
 .../scripts/dashboard/dashboard-bulkload.json   |  88 +++
 .../package/scripts/dashboard/dashboardindex.py |  95 +++
 .../package/scripts/dashboard/kibana.template   | 233 ++++++++
 .../package/scripts/enrichment_commands.py      |  20 +-
 .../package/scripts/enrichment_master.py        |  12 +-
 .../package/scripts/indexing_commands.py        |  43 +-
 .../CURRENT/package/scripts/indexing_master.py  |  32 +
 .../CURRENT/package/scripts/metron_service.py   |  10 +
 .../package/scripts/params/params_linux.py      |  33 +-
 .../package/scripts/params/status_params.py     |   7 +-
 .../CURRENT/package/scripts/rest_commands.py    |  69 ++-
 .../CURRENT/package/scripts/rest_master.py      |  16 +-
 .../enrichment-splitjoin.properties.j2          |  63 ++
 .../templates/enrichment-unified.properties.j2  |  60 ++
 .../package/templates/profiler.properties.j2    |  15 +-
 .../METRON/CURRENT/themes/metron_theme.json     | 275 ++++++++-
 .../packaging/docker/deb-docker/pom.xml         |   6 +
 .../docker/rpm-docker/SPECS/metron.spec         |  29 +-
 .../packaging/docker/rpm-docker/pom.xml         |   6 +
 .../packaging/packer-build/README.md            |   2 +-
 metron-deployment/pom.xml                       |   1 +
 .../alert-filters/alert-filters.e2e-spec.ts     |  11 +-
 .../meta-alerts/meta-alert.e2e-spec.ts          |   5 +-
 .../alerts-list/tree-view/tree-view.e2e-spec.ts |   5 +-
 .../alert-filters/alert-filters.component.ts    |   4 +-
 .../src/app/model/search-request.ts             |   2 +-
 .../app/shared/group-by/group-by.component.ts   |   4 +-
 metron-interface/metron-config/package.json     |   2 +-
 .../apache/metron/rest/model/AlertProfile.java  |  88 ---
 .../metron/rest/model/AlertsUIUserSettings.java |  90 +++
 metron-interface/metron-rest/README.md          | 172 +++---
 metron-interface/metron-rest/pom.xml            |   5 +
 .../apache/metron/rest/MetronRestConstants.java |   4 +
 .../apache/metron/rest/config/HBaseConfig.java  |  55 ++
 .../metron/rest/controller/AlertController.java | 119 ----
 .../rest/controller/AlertsUIController.java     | 124 ++++
 .../metron/rest/controller/UserController.java  |   3 +-
 .../rest/repository/AlertProfileRepository.java |  25 -
 .../metron/rest/service/AlertService.java       |  39 --
 .../rest/service/AlertsProfileService.java      |  32 -
 .../metron/rest/service/AlertsUIService.java    |  41 ++
 .../apache/metron/rest/service/UserService.java |  33 ++
 .../rest/service/impl/AlertServiceImpl.java     |  97 ---
 .../service/impl/AlertsProfileServiceImpl.java  |  66 ---
 .../rest/service/impl/AlertsUIServiceImpl.java  | 131 ++++
 .../rest/service/impl/SearchServiceImpl.java    |  22 +-
 .../src/main/resources/application-test.yml     |   2 +
 .../src/main/resources/application.yml          |   9 +-
 .../metron-rest/src/main/scripts/metron-rest.sh |   9 +
 .../metron/rest/config/HBaseConfigTest.java     |  69 +++
 .../apache/metron/rest/config/TestConfig.java   |  26 +-
 .../AlertControllerIntegrationTest.java         | 345 -----------
 .../AlertsUIControllerIntegrationTest.java      | 340 +++++++++++
 .../SearchControllerIntegrationTest.java        |  61 +-
 .../UserControllerIntegrationTest.java          |  40 +-
 .../rest/service/impl/AlertServiceImplTest.java | 152 -----
 .../service/impl/AlertsUIServiceImplTest.java   | 180 ++++++
 .../service/impl/SearchServiceImplTest.java     |  60 +-
 metron-platform/Performance-tuning-guide.md     | 259 +++++++-
 metron-platform/metron-common/README.md         |  32 +
 .../src/main/config/zookeeper/global.json       |   5 +-
 .../configuration/ConfigurationsUtils.java      | 123 +++-
 .../common/configuration/FieldTransformer.java  |   4 +-
 .../configuration/SensorParserConfig.java       |  15 +
 .../enrichment/handler/ConfigHandler.java       |   4 +
 .../configuration/profiler/ProfileConfig.java   | 159 ++++-
 .../profiler/ProfileResultExpressions.java      |   4 +-
 .../profiler/ProfileTriageExpressions.java      |   8 +
 .../configuration/profiler/ProfilerConfig.java  | 115 +++-
 .../transformation/FieldTransformations.java    |   1 +
 .../transformation/RenameTransformation.java    |  55 ++
 .../transformation/StellarTransformation.java   |   3 +-
 .../common/message/BytesFromPosition.java       |   4 +-
 .../message/JSONFromFieldByReference.java       |  37 ++
 .../metron/common/message/JSONFromPosition.java |   4 +-
 .../metron/common/message/MessageGetters.java   |   1 +
 .../apache/metron/common/utils/HDFSUtils.java   |  59 ++
 .../apache/metron/common/utils/JSONUtils.java   |  11 +-
 .../metron/common/utils/ReflectionUtils.java    |  66 ++-
 .../org/apache/metron/common/writer/test.json   |  31 -
 .../configurations/ProfilerUpdater.java         |   1 +
 .../src/main/scripts/cluster_info.py            | 389 ++++++++++++
 .../profiler/ProfileConfigTest.java             | 107 +++-
 .../profiler/ProfilerConfigTest.java            | 209 +++++++
 .../metron/common/error/MetronErrorTest.java    |  18 +-
 .../transformation/FieldTransformationTest.java |  17 +-
 .../RenameTransformationTest.java               |  99 ++++
 .../StellarTransformationTest.java              |  30 +
 .../ZKConfigurationsCacheIntegrationTest.java   |   4 +-
 .../elasticsearch/dao/ElasticsearchDao.java     |   2 +-
 .../dao/ElasticsearchSearchDao.java             |  30 +-
 .../elasticsearch/utils/ElasticsearchUtils.java | 107 +++-
 .../writer/ElasticsearchWriter.java             |   8 +-
 .../scripts/start_elasticsearch_topology.sh     |   8 +-
 .../writer/ElasticsearchWriterTest.java         |  19 +-
 .../metron-enrichment/Performance.md            | 514 ++++++++++++++++
 metron-platform/metron-enrichment/README.md     |  47 +-
 metron-platform/metron-enrichment/pom.xml       |   6 +
 .../main/config/enrichment-splitjoin.properties |  63 ++
 .../config/enrichment-splitjoin.properties.j2   |  63 ++
 .../main/config/enrichment-unified.properties   |  69 +++
 .../config/enrichment-unified.properties.j2     |  60 ++
 .../src/main/config/enrichment.properties       |  64 --
 .../src/main/config/enrichment.properties.j2    |  63 --
 .../main/flux/enrichment/remote-splitjoin.yaml  | 590 ++++++++++++++++++
 .../main/flux/enrichment/remote-unified.yaml    | 387 ++++++++++++
 .../src/main/flux/enrichment/remote.yaml        | 594 -------------------
 .../adapters/stellar/StellarAdapter.java        |   5 +-
 .../enrichment/bolt/EnrichmentJoinBolt.java     |   4 +-
 .../enrichment/bolt/EnrichmentSplitterBolt.java |   4 +-
 .../enrichment/bolt/GenericEnrichmentBolt.java  |  33 +-
 .../apache/metron/enrichment/bolt/JoinBolt.java |  34 +-
 .../enrichment/bolt/ThreatIntelJoinBolt.java    | 119 +---
 .../bolt/ThreatIntelSplitterBolt.java           |   4 +-
 .../enrichment/bolt/UnifiedEnrichmentBolt.java  | 412 +++++++++++++
 .../enrichment/parallel/ConcurrencyContext.java |  96 +++
 .../enrichment/parallel/EnrichmentCallable.java |  66 +++
 .../enrichment/parallel/EnrichmentContext.java  |  43 ++
 .../parallel/EnrichmentStrategies.java          | 108 ++++
 .../enrichment/parallel/EnrichmentStrategy.java |  71 +++
 .../enrichment/parallel/ParallelEnricher.java   | 289 +++++++++
 .../parallel/WorkerPoolStrategies.java          |  45 ++
 .../enrichment/utils/EnrichmentUtils.java       |  16 +
 .../enrichment/utils/ThreatIntelUtils.java      | 127 ++++
 .../main/scripts/start_enrichment_topology.sh   |  16 +-
 .../bolt/BulkMessageWriterBoltTest.java         |  25 +
 .../bolt/GenericEnrichmentBoltTest.java         |   2 +-
 .../metron/enrichment/bolt/JoinBoltTest.java    |   7 +-
 .../integration/EnrichmentIntegrationTest.java  | 113 ++--
 .../UnifiedEnrichmentIntegrationTest.java       |  96 +++
 .../parallel/ParallelEnricherTest.java          | 251 ++++++++
 .../unified_enrichment_arch.svg                 |  14 +
 .../unified_enrichment_arch_diagram.xml         |  14 +
 .../org/apache/metron/hbase/bolt/HBaseBolt.java |  22 +-
 .../metron/hbase/client/UserSettingsClient.java | 175 ++++++
 .../hbase/client/UserSettingsClientTest.java    | 101 ++++
 .../apache/metron/hbase/mock/MockHTable.java    |   7 +-
 metron-platform/metron-indexing/README.md       |  24 +
 .../src/main/flux/indexing/batch/remote.yaml    |   4 +-
 .../flux/indexing/random_access/remote.yaml     |   4 +-
 .../indexing/dao/search/SearchRequest.java      |  15 +-
 .../metron/indexing/dao/search/SortField.java   |  15 +
 .../apache/metron/indexing/dao/InMemoryDao.java |  18 +
 .../integration/components/KafkaComponent.java  |  39 +-
 .../jsonMapQuery/parsed/jsonMapExampleParsed    |   2 +
 .../data/jsonMapQuery/raw/jsonMapExampleOutput  |   1 +
 metron-platform/metron-management/pom.xml       |   1 -
 .../management/ConfigurationFunctions.java      | 564 ++++++++++--------
 .../management/ConfigurationFunctionsTest.java  | 424 +++++++++----
 metron-platform/metron-parsers/README.md        |  79 ++-
 metron-platform/metron-parsers/pom.xml          |   5 +
 .../config/zookeeper/parsers/jsonMapQuery.json  |   5 +
 .../apache/metron/parsers/bolt/ParserBolt.java  |  32 +-
 .../metron/parsers/json/JSONMapParser.java      | 145 +++--
 .../parsers/topology/ParserTopologyBuilder.java |   4 +-
 .../metron/parsers/bolt/ParserBoltTest.java     |  16 +-
 .../JSONMapQueryIntegrationTest.java            |  36 ++
 .../parsers/integration/ParserDriver.java       |   6 +-
 .../validation/SampleDataValidation.java        |   2 +-
 .../parsers/json/JSONMapParserQueryTest.java    | 201 +++++++
 .../apache/metron/solr/dao/SolrSearchDao.java   |  24 +-
 .../metron/solr/dao/SolrSearchDaoTest.java      |   8 +-
 .../metron/test/utils/ValidationUtils.java      |  46 +-
 .../writer/bolt/BulkMessageWriterBolt.java      |  51 +-
 metron-sensors/pycapa/README.md                 |  84 +--
 metron-stellar/stellar-common/pom.xml           |   5 +
 .../stellar/common/BaseStellarProcessor.java    |  31 +-
 .../stellar/common/CachingStellarProcessor.java | 144 +++++
 .../shell/DefaultStellarShellExecutor.java      |  34 +-
 .../shell/specials/AssignmentCommand.java       |   2 +-
 .../stellar/common/utils/ConversionUtils.java   |  19 +-
 .../common/utils/StellarProcessorUtils.java     | 135 +++--
 .../org/apache/metron/stellar/dsl/Context.java  |  43 +-
 .../stellar/dsl/functions/DateFunctions.java    |   8 +-
 .../resolver/ClasspathFunctionResolver.java     |  45 +-
 .../common/CachingStellarProcessorTest.java     | 104 ++++
 .../shell/DefaultStellarShellExecutorTest.java  |  11 +
 .../shell/specials/AssignmentCommandTest.java   |  14 +
 .../resolver/ClasspathFunctionResolverTest.java |  30 +
 metron-stellar/stellar-zeppelin/README.md       |  80 +--
 metron-stellar/stellar-zeppelin/pom.xml         |  12 +
 .../stellar/zeppelin/StellarInterpreter.java    |  95 +--
 .../zeppelin/StellarInterpreterProperty.java    |  79 +++
 .../StellarInterpreterPropertyTest.java         |  62 ++
 .../zeppelin/StellarInterpreterTest.java        |  60 +-
 .../integration/ConfigUploadComponent.java      |  82 +++
 .../StellarInterpreterIntegrationTest.java      | 104 ++++
 pom.xml                                         |   1 +
 site-book/pom.xml                               |   4 +-
 .../src-resources/templates/site.xml.template   |   6 +-
 site/community/index.md                         |   8 +-
 371 files changed, 20030 insertions(+), 7283 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/dependencies_with_url.csv
----------------------------------------------------------------------
diff --cc dependencies_with_url.csv
index 2bf1c76,1e73eb1..0b4b16b
--- a/dependencies_with_url.csv
+++ b/dependencies_with_url.csv
@@@ -21,8 -21,10 +21,11 @@@ com.esotericsoftware:reflectasm:jar:1.1
  com.flipkart.zjsonpatch:zjsonpatch:jar:0.3.4:compile,Apache v2, https://github.com/flipkart-incubator/zjsonpatch
  com.google.protobuf:protobuf-java:jar:2.5.0:compile,New BSD license,http://code.google.com/p/protobuf
  com.google.protobuf:protobuf-java:jar:2.6.1:compile,New BSD license,http://code.google.com/p/protobuf
 +com.google.protobuf:protobuf-java:jar:3.1.0:compile,New BSD license,http://code.google.com/p/protobuf
  com.jcraft:jsch:jar:0.1.42:compile,BSD,http://www.jcraft.com/jsch/
+ com.jayway.jsonpath:json-path:jar:2.3.0:compile,Apache v2,https://github.com/json-path/JsonPath
+ net.minidev:accessors-smart:jar:1.2:compile,Apache v2,https://github.com/netplex/json-smart-v2
+ net.minidev:json-smart:jar:2.3:compile,Apache v2,https://github.com/netplex/json-smart-v2
  com.maxmind.db:maxmind-db:jar:1.2.1:compile,CC-BY-SA 3.0,https://github.com/maxmind/MaxMind-DB
  com.maxmind.geoip2:geoip2:jar:2.8.0:compile,Apache v2,https://github.com/maxmind/GeoIP2-java
  com.sun.xml.bind:jaxb-impl:jar:2.2.3-1:compile,CDDL,http://jaxb.java.net/

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/ansible/playbooks/metron_full_install.yml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
----------------------------------------------------------------------
diff --cc metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
index e63ea2d,1cd6f4c..b5c4bb9
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
@@@ -166,34 -153,34 +170,62 @@@ class Indexing(Script)
                cmd.format(params.es_http_url, template_name),
                logoutput=True)
  
 +    def solr_schema_install(self, env):
 +        from params import params
 +        env.set_params(params)
 +        Logger.info("Installing Solr schemas")
 +
 +        commands = IndexingCommands(params)
 +        for collection_name, config_path in commands.get_solr_schemas().iteritems():
 +
 +            # install the schema
 +
 +            cmd = "{0}/bin/solr create -c {1} -d {2}"
 +            Execute(
 +                cmd.format(params.solr_home, collection_name, config_path),
 +                logoutput=True, user="solr")
 +
 +    def solr_schema_delete(self, env):
 +        from params import params
 +        env.set_params(params)
 +        Logger.info("Deleting Solr schemas")
 +
 +        commands = IndexingCommands(params)
 +        for collection_name, config_path in commands.get_solr_schemas().iteritems():
 +            # delete the schema
 +            cmd = "{0}/bin/solr delete -c {1}"
 +            Execute(
 +                cmd.format(params.solr_home, collection_name),
 +                logoutput=True, user="solr")
 +
+     @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+     def kibana_dashboard_install(self, env):
+       from params import params
+       env.set_params(params)
+ 
+       Logger.info("Connecting to Elasticsearch on: %s" % (params.es_http_url))
+ 
+       kibanaTemplate = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dashboard', 'kibana.template')
+       if not os.path.isfile(kibanaTemplate):
+         raise IOError(
+             errno.ENOENT, os.strerror(errno.ENOENT), kibanaTemplate)
+ 
+       Logger.info("Loading .kibana index template from %s" % kibanaTemplate)
+       template_cmd = ambari_format(
+           'curl -s -XPOST http://{es_http_url}/_template/.kibana -d @%s' % kibanaTemplate)
+       Execute(template_cmd, logoutput=True)
+ 
+       kibanaDashboardLoad = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dashboard', 'dashboard-bulkload.json')
+       if not os.path.isfile(kibanaDashboardLoad):
+         raise IOError(
+             errno.ENOENT, os.strerror(errno.ENOENT), kibanaDashboardLoad)
+ 
+       Logger.info("Loading .kibana dashboard from %s" % kibanaDashboardLoad)
+ 
+       kibana_cmd = ambari_format(
+           'curl -s -H "Content-Type: application/x-ndjson" -XPOST http://{es_http_url}/.kibana/_bulk --data-binary @%s' % kibanaDashboardLoad)
+       Execute(kibana_cmd, logoutput=True)
+ 
      def zeppelin_notebook_import(self, env):
          from params import params
          env.set_params(params)

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/metron_service.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
----------------------------------------------------------------------
diff --cc metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
index 5bfa1dc,f44d05f..6f4760b
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
@@@ -84,7 -83,10 +83,11 @@@ indexing_hbase_configured_flag_file = s
  indexing_hbase_acl_configured_flag_file = status_params.indexing_hbase_acl_configured_flag_file
  indexing_hdfs_perm_configured_flag_file = status_params.indexing_hdfs_perm_configured_flag_file
  elasticsearch_template_installed_flag_file = status_params.elasticsearch_template_installed_flag_file
 +solr_schema_installed_flag_file = status_params.solr_schema_installed_flag_file
+ rest_kafka_configured_flag_file = status_params.rest_kafka_configured_flag_file
+ rest_kafka_acl_configured_flag_file = status_params.rest_kafka_acl_configured_flag_file
+ rest_hbase_configured_flag_file = status_params.rest_hbase_configured_flag_file
+ rest_hbase_acl_configured_flag_file = status_params.rest_hbase_acl_configured_flag_file
  global_properties_template = config['configurations']['metron-env']['elasticsearch-properties']
  
  # Elasticsearch hosts and port management

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/status_params.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_commands.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-interface/metron-rest/src/main/scripts/metron-rest.sh
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchSearchDao.java
----------------------------------------------------------------------
diff --cc metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchSearchDao.java
index 5e9ed02,0000000..3971237
mode 100644,000000..100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchSearchDao.java
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchSearchDao.java
@@@ -1,565 -1,0 +1,567 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.metron.elasticsearch.dao;
 +
 +import static org.apache.metron.elasticsearch.utils.ElasticsearchUtils.INDEX_NAME_DELIMITER;
 +
 +import com.google.common.base.Splitter;
 +import com.google.common.collect.Iterables;
 +import java.io.IOException;
 +import java.lang.invoke.MethodHandles;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Optional;
 +import java.util.function.Function;
 +import org.apache.metron.elasticsearch.utils.ElasticsearchUtils;
 +import org.apache.metron.indexing.dao.AccessConfig;
 +import org.apache.metron.indexing.dao.search.FieldType;
 +import org.apache.metron.indexing.dao.search.GetRequest;
 +import org.apache.metron.indexing.dao.search.Group;
 +import org.apache.metron.indexing.dao.search.GroupOrder;
 +import org.apache.metron.indexing.dao.search.GroupOrderType;
 +import org.apache.metron.indexing.dao.search.GroupRequest;
 +import org.apache.metron.indexing.dao.search.GroupResponse;
 +import org.apache.metron.indexing.dao.search.GroupResult;
 +import org.apache.metron.indexing.dao.search.InvalidSearchException;
 +import org.apache.metron.indexing.dao.search.SearchDao;
 +import org.apache.metron.indexing.dao.search.SearchRequest;
 +import org.apache.metron.indexing.dao.search.SearchResponse;
 +import org.apache.metron.indexing.dao.search.SearchResult;
 +import org.apache.metron.indexing.dao.search.SortField;
 +import org.apache.metron.indexing.dao.search.SortOrder;
 +import org.apache.metron.indexing.dao.update.Document;
 +import org.elasticsearch.action.search.SearchRequestBuilder;
 +import org.elasticsearch.client.transport.TransportClient;
 +import org.elasticsearch.index.mapper.LegacyIpFieldMapper;
 +import org.elasticsearch.index.query.IdsQueryBuilder;
 +import org.elasticsearch.index.query.QueryBuilder;
 +import org.elasticsearch.index.query.QueryBuilders;
 +import org.elasticsearch.index.query.QueryStringQueryBuilder;
 +import org.elasticsearch.search.SearchHit;
 +import org.elasticsearch.search.SearchHits;
 +import org.elasticsearch.search.aggregations.Aggregation;
 +import org.elasticsearch.search.aggregations.AggregationBuilders;
 +import org.elasticsearch.search.aggregations.Aggregations;
 +import org.elasticsearch.search.aggregations.bucket.terms.Terms;
 +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
 +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
 +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
 +import org.elasticsearch.search.aggregations.metrics.sum.Sum;
 +import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
 +import org.elasticsearch.search.builder.SearchSourceBuilder;
 +import org.elasticsearch.search.sort.FieldSortBuilder;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +public class ElasticsearchSearchDao implements SearchDao {
 +
 +  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 +
 +  /**
 +   * The value required to ensure that Elasticsearch sorts missing values last.
 +   */
 +  private static final String SORT_MISSING_LAST = "_last";
 +
 +  /**
 +   * The value required to ensure that Elasticsearch sorts missing values last.
 +   */
 +  private static final String SORT_MISSING_FIRST = "_first";
 +
 +  private transient TransportClient client;
 +  private AccessConfig accessConfig;
 +  private ElasticsearchColumnMetadataDao columnMetadataDao;
 +  private ElasticsearchRequestSubmitter requestSubmitter;
 +
 +  public ElasticsearchSearchDao(TransportClient client,
 +      AccessConfig accessConfig,
 +      ElasticsearchColumnMetadataDao columnMetadataDao,
 +      ElasticsearchRequestSubmitter requestSubmitter) {
 +    this.client = client;
 +    this.accessConfig = accessConfig;
 +    this.columnMetadataDao = columnMetadataDao;
 +    this.requestSubmitter = requestSubmitter;
 +  }
 +
 +  @Override
 +  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
 +    if(searchRequest.getQuery() == null) {
 +      throw new InvalidSearchException("Search query is invalid: null");
 +    }
 +    return search(searchRequest, new QueryStringQueryBuilder(searchRequest.getQuery()));
 +  }
 +
 +  @Override
 +  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
 +    return group(groupRequest, new QueryStringQueryBuilder(groupRequest.getQuery()));
 +  }
 +
 +  @Override
 +  public Document getLatest(String guid, String sensorType) throws IOException {
 +    Optional<Document> doc = searchByGuid(guid, sensorType, hit -> toDocument(guid, hit));
 +    return doc.orElse(null);
 +  }
 +
 +  <T> Optional<T> searchByGuid(String guid, String sensorType,
 +      Function<SearchHit, Optional<T>> callback) {
 +    Collection<String> sensorTypes = sensorType != null ? Collections.singleton(sensorType) : null;
 +    List<T> results = searchByGuids(Collections.singleton(guid), sensorTypes, callback);
 +    if (results.size() > 0) {
 +      return Optional.of(results.get(0));
 +    } else {
 +      return Optional.empty();
 +    }
 +  }
 +
 +  @Override
 +  public Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException {
 +    Collection<String> guids = new HashSet<>();
 +    Collection<String> sensorTypes = new HashSet<>();
 +    for (GetRequest getRequest: getRequests) {
 +      guids.add(getRequest.getGuid());
 +      sensorTypes.add(getRequest.getSensorType());
 +    }
 +    List<Document> documents = searchByGuids(
 +        guids
 +        , sensorTypes
 +        , hit -> {
 +          Long ts = 0L;
 +          String doc = hit.getSourceAsString();
 +          String sourceType = Iterables.getFirst(Splitter.on("_doc").split(hit.getType()), null);
 +          try {
 +            return Optional.of(new Document(doc, hit.getId(), sourceType, ts));
 +          } catch (IOException e) {
 +            throw new IllegalStateException("Unable to retrieve latest: " + e.getMessage(), e);
 +          }
 +        }
 +
 +    );
 +    return documents;
 +  }
 +
 +  /**
 +   * Defers to a provided {@link org.elasticsearch.index.query.QueryBuilder} for the query.
 +   * @param request The request defining the parameters of the search
 +   * @param queryBuilder The actual query to be run. Intended for if the SearchRequest requires wrapping
 +   * @return The results of the query
 +   * @throws InvalidSearchException When the query is malformed or the current state doesn't allow search
 +   */
 +  protected SearchResponse search(SearchRequest request, QueryBuilder queryBuilder) throws InvalidSearchException {
 +    org.elasticsearch.action.search.SearchRequest esRequest;
 +    org.elasticsearch.action.search.SearchResponse esResponse;
 +
 +    if(client == null) {
 +      throw new InvalidSearchException("Uninitialized Dao!  You must call init() prior to use.");
 +    }
 +
 +    if (request.getSize() > accessConfig.getMaxSearchResults()) {
 +      throw new InvalidSearchException("Search result size must be less than " + accessConfig.getMaxSearchResults());
 +    }
 +
 +    esRequest = buildSearchRequest(request, queryBuilder);
 +    esResponse = requestSubmitter.submitSearch(esRequest);
 +    return buildSearchResponse(request, esResponse);
 +  }
 +
 +  /**
 +   * Builds an Elasticsearch search request.
 +   * @param searchRequest The Metron search request.
 +   * @param queryBuilder
 +   * @return An Elasticsearch search request.
 +   */
 +  private org.elasticsearch.action.search.SearchRequest buildSearchRequest(
 +      SearchRequest searchRequest,
 +      QueryBuilder queryBuilder) throws InvalidSearchException {
 +    if (LOG.isDebugEnabled()) {
 +      LOG.debug("Got search request; request={}", ElasticsearchUtils.toJSON(searchRequest).orElse("???"));
 +    }
 +    SearchSourceBuilder searchBuilder = new SearchSourceBuilder()
 +        .size(searchRequest.getSize())
 +        .from(searchRequest.getFrom())
 +        .query(queryBuilder)
 +        .trackScores(true);
-     Optional<List<String>> fields = searchRequest.getFields();
++    List<String> fields = searchRequest.getFields();
 +    // column metadata needed to understand the type of each sort field
 +    Map<String, FieldType> meta;
 +    try {
 +      meta = columnMetadataDao.getColumnMetadata(searchRequest.getIndices());
 +    } catch(IOException e) {
 +      throw new InvalidSearchException("Unable to get column metadata", e);
 +    }
 +
 +    // handle sort fields
 +    for(SortField sortField : searchRequest.getSort()) {
 +
 +      // what type is the sort field?
 +      FieldType sortFieldType = meta.getOrDefault(sortField.getField(), FieldType.OTHER);
 +
 +      // sort order - if ascending missing values sorted last. otherwise, missing values sorted first
 +      org.elasticsearch.search.sort.SortOrder sortOrder = getElasticsearchSortOrder(sortField.getSortOrder());
 +      String missingSortOrder;
 +      if(sortOrder == org.elasticsearch.search.sort.SortOrder.DESC) {
 +        missingSortOrder = SORT_MISSING_LAST;
 +      } else {
 +        missingSortOrder = SORT_MISSING_FIRST;
 +      }
 +
 +      // sort by the field - missing fields always last
 +      FieldSortBuilder sortBy = new FieldSortBuilder(sortField.getField())
 +          .order(sortOrder)
 +          .missing(missingSortOrder)
 +          .unmappedType(sortFieldType.getFieldType());
 +      searchBuilder.sort(sortBy);
 +    }
 +
 +    // handle search fields
-     if (fields.isPresent()) {
++    if (fields != null) {
 +      searchBuilder.fetchSource("*", null);
 +    } else {
 +      searchBuilder.fetchSource(true);
 +    }
 +
-     Optional<List<String>> facetFields = searchRequest.getFacetFields();
++    List<String> facetFields = searchRequest.getFacetFields();
 +
 +    // handle facet fields
-     if (searchRequest.getFacetFields().isPresent()) {
++    if (facetFields != null) {
 +      // https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/_bucket_aggregations.html
-       for(String field : searchRequest.getFacetFields().get()) {
++      for(String field : facetFields) {
 +        String name = getFacetAggregationName(field);
 +        TermsAggregationBuilder terms = AggregationBuilders.terms( name).field(field);
 +        // new TermsBuilder(name).field(field);
 +        searchBuilder.aggregation(terms);
 +      }
 +    }
 +
 +    // return the search request
 +    String[] indices = wildcardIndices(searchRequest.getIndices());
 +    if (LOG.isDebugEnabled()) {
 +      LOG.debug("Built Elasticsearch request; indices={}, request={}", indices, searchBuilder.toString());
 +    }
 +    return new org.elasticsearch.action.search.SearchRequest()
 +        .indices(indices)
 +        .source(searchBuilder);
 +  }
 +
 +  /**
 +   * Builds a search response.
 +   *
 +   * This effectively transforms an Elasticsearch search response into a Metron search response.
 +   *
 +   * @param searchRequest The Metron search request.
 +   * @param esResponse The Elasticsearch search response.
 +   * @return A Metron search response.
 +   * @throws InvalidSearchException
 +   */
 +  private SearchResponse buildSearchResponse(
 +      SearchRequest searchRequest,
 +      org.elasticsearch.action.search.SearchResponse esResponse) throws InvalidSearchException {
 +
 +    SearchResponse searchResponse = new SearchResponse();
 +
 +    searchResponse.setTotal(esResponse.getHits().getTotalHits());
 +
 +    // search hits --> search results
 +    List<SearchResult> results = new ArrayList<>();
 +    for(SearchHit hit: esResponse.getHits().getHits()) {
 +      results.add(getSearchResult(hit, searchRequest.getFields()));
 +    }
 +    searchResponse.setResults(results);
 +
 +    // handle facet fields
-     if (searchRequest.getFacetFields().isPresent()) {
-       List<String> facetFields = searchRequest.getFacetFields().get();
++    if (searchRequest.getFacetFields() != null) {
++      List<String> facetFields = searchRequest.getFacetFields();
 +      Map<String, FieldType> commonColumnMetadata;
 +      try {
 +        commonColumnMetadata = columnMetadataDao.getColumnMetadata(searchRequest.getIndices());
 +      } catch (IOException e) {
 +        throw new InvalidSearchException(String.format(
 +            "Could not get common column metadata for indices %s",
 +            Arrays.toString(searchRequest.getIndices().toArray())));
 +      }
 +      searchResponse.setFacetCounts(getFacetCounts(facetFields, esResponse.getAggregations(), commonColumnMetadata ));
 +    }
 +
 +    if (LOG.isDebugEnabled()) {
 +      LOG.debug("Built search response; response={}", ElasticsearchUtils.toJSON(searchResponse).orElse("???"));
 +    }
 +    return searchResponse;
 +  }
 +
 +  private org.elasticsearch.search.sort.SortOrder getElasticsearchSortOrder(
 +      org.apache.metron.indexing.dao.search.SortOrder sortOrder) {
 +    return sortOrder == org.apache.metron.indexing.dao.search.SortOrder.DESC ?
 +        org.elasticsearch.search.sort.SortOrder.DESC : org.elasticsearch.search.sort.SortOrder.ASC;
 +  }
 +
 +  private String getFacetAggregationName(String field) {
 +    return String.format("%s_count", field);
 +  }
 +
 +  private String[] wildcardIndices(List<String> indices) {
 +    if(indices == null)
 +      return new String[] {};
 +
 +    return indices
 +        .stream()
 +        .map(index -> String.format("%s%s*", index, INDEX_NAME_DELIMITER))
 +        .toArray(value -> new String[indices.size()]);
 +  }
 +
-   private SearchResult getSearchResult(SearchHit searchHit, Optional<List<String>> fields) {
++  private SearchResult getSearchResult(SearchHit searchHit, List<String> fields) {
 +    SearchResult searchResult = new SearchResult();
 +    searchResult.setId(searchHit.getId());
 +    Map<String, Object> source;
-     if (fields.isPresent()) {
++    if (fields != null) {
 +      Map<String, Object> resultSourceAsMap = searchHit.getSourceAsMap();
 +      source = new HashMap<>();
-       fields.get().forEach(field -> {
++      fields.forEach(field -> {
 +        source.put(field, resultSourceAsMap.get(field));
 +      });
 +    } else {
 +      source = searchHit.getSource();
 +    }
 +    searchResult.setSource(source);
 +    searchResult.setScore(searchHit.getScore());
 +    searchResult.setIndex(searchHit.getIndex());
 +    return searchResult;
 +  }
 +
 +  private Map<String, Map<String, Long>> getFacetCounts(List<String> fields, Aggregations aggregations, Map<String, FieldType> commonColumnMetadata) {
 +    Map<String, Map<String, Long>> fieldCounts = new HashMap<>();
 +    for (String field: fields) {
 +      Map<String, Long> valueCounts = new HashMap<>();
-       Aggregation aggregation = aggregations.get(getFacetAggregationName(field));
-       if (aggregation instanceof Terms) {
-         Terms terms = (Terms) aggregation;
-         terms.getBuckets().stream().forEach(bucket -> valueCounts.put(formatKey(bucket.getKey(), commonColumnMetadata.get(field)), bucket.getDocCount()));
++      if(aggregations != null ) {
++        Aggregation aggregation = aggregations.get(getFacetAggregationName(field));
++        if (aggregation instanceof Terms) {
++          Terms terms = (Terms) aggregation;
++          terms.getBuckets().stream().forEach(bucket -> valueCounts.put(formatKey(bucket.getKey(), commonColumnMetadata.get(field)), bucket.getDocCount()));
++        }
 +      }
 +      fieldCounts.put(field, valueCounts);
 +    }
 +    return fieldCounts;
 +  }
 +
 +  private String formatKey(Object key, FieldType type) {
 +    if (FieldType.IP.equals(type) && key instanceof Long) {
 +      return LegacyIpFieldMapper.longToIp((Long) key);
 +    } else if (FieldType.BOOLEAN.equals(type)) {
 +      return (Long) key == 1 ? "true" : "false";
 +    } else {
 +      return key.toString();
 +    }
 +  }
 +
 +  /**
 +   * Defers to a provided {@link org.elasticsearch.index.query.QueryBuilder} for the query.
 +   * @param groupRequest The request defining the parameters of the grouping
 +   * @param queryBuilder The actual query to be run. Intended for if the SearchRequest requires wrapping
 +   * @return The results of the query
 +   * @throws InvalidSearchException When the query is malformed or the current state doesn't allow search
 +   */
 +  protected GroupResponse group(GroupRequest groupRequest, QueryBuilder queryBuilder)
 +      throws InvalidSearchException {
 +    org.elasticsearch.action.search.SearchRequest esRequest;
 +    org.elasticsearch.action.search.SearchResponse esResponse;
 +
 +    if (client == null) {
 +      throw new InvalidSearchException("Uninitialized Dao!  You must call init() prior to use.");
 +    }
 +    if (groupRequest.getGroups() == null || groupRequest.getGroups().size() == 0) {
 +      throw new InvalidSearchException("At least 1 group must be provided.");
 +    }
 +
 +    esRequest = buildGroupRequest(groupRequest, queryBuilder);
 +    esResponse = requestSubmitter.submitSearch(esRequest);
 +    GroupResponse response = buildGroupResponse(groupRequest, esResponse);
 +
 +    return response;
 +  }
 +
 +  /**
 +   * Builds a group search request.
 +   * @param groupRequest The Metron group request.
 +   * @param queryBuilder The search query.
 +   * @return An Elasticsearch search request.
 +   */
 +  private org.elasticsearch.action.search.SearchRequest buildGroupRequest(
 +      GroupRequest groupRequest,
 +      QueryBuilder queryBuilder) {
 +
 +    // handle groups
 +    TermsAggregationBuilder groups = getGroupsTermBuilder(groupRequest, 0);
 +    final SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder()
 +        .query(queryBuilder)
 +        .aggregation(groups);
 +
 +    // return the search request
 +    String[] indices = wildcardIndices(groupRequest.getIndices());
 +    return new org.elasticsearch.action.search.SearchRequest()
 +        .indices(indices)
 +        .source(searchSourceBuilder);
 +  }
 +
 +  private TermsAggregationBuilder getGroupsTermBuilder(GroupRequest groupRequest, int index) {
 +    List<Group> groups = groupRequest.getGroups();
 +    Group group = groups.get(index);
 +    String aggregationName = getGroupByAggregationName(group.getField());
 +    TermsAggregationBuilder termsBuilder = AggregationBuilders.terms(aggregationName);
 +    termsBuilder
 +        .field(group.getField())
 +        .size(accessConfig.getMaxSearchGroups())
 +        .order(getElasticsearchGroupOrder(group.getOrder()));
 +    if (index < groups.size() - 1) {
 +      termsBuilder.subAggregation(getGroupsTermBuilder(groupRequest, index + 1));
 +    }
 +    Optional<String> scoreField = groupRequest.getScoreField();
 +    if (scoreField.isPresent()) {
 +      SumAggregationBuilder scoreSumAggregationBuilder = AggregationBuilders.sum(getSumAggregationName(scoreField.get())).field(scoreField.get()).missing(0);
 +      termsBuilder.subAggregation(scoreSumAggregationBuilder);
 +    }
 +    return termsBuilder;
 +  }
 +
 +  private String getGroupByAggregationName(String field) {
 +    return String.format("%s_group", field);
 +  }
 +
 +  private String getSumAggregationName(String field) {
 +    return String.format("%s_score", field);
 +  }
 +
 +  private Order getElasticsearchGroupOrder(GroupOrder groupOrder) {
 +    if (groupOrder.getGroupOrderType() == GroupOrderType.TERM) {
 +      return groupOrder.getSortOrder() == SortOrder.ASC ? Order.term(true) : Order.term(false);
 +    } else {
 +      return groupOrder.getSortOrder() == SortOrder.ASC ? Order.count(true) : Order.count(false);
 +    }
 +  }
 +
 +  /**
 +   * Build a group response.
 +   * @param groupRequest The original group request.
 +   * @param response The search response.
 +   * @return A group response.
 +   * @throws InvalidSearchException
 +   */
 +  private GroupResponse buildGroupResponse(
 +      GroupRequest groupRequest,
 +      org.elasticsearch.action.search.SearchResponse response) throws InvalidSearchException {
 +
 +    // build the search response
 +    Map<String, FieldType> commonColumnMetadata;
 +    try {
 +      commonColumnMetadata = columnMetadataDao.getColumnMetadata(groupRequest.getIndices());
 +    } catch (IOException e) {
 +      throw new InvalidSearchException(String.format("Could not get common column metadata for indices %s",
 +          Arrays.toString(groupRequest.getIndices().toArray())));
 +    }
 +
 +    GroupResponse groupResponse = new GroupResponse();
 +    groupResponse.setGroupedBy(groupRequest.getGroups().get(0).getField());
 +    groupResponse.setGroupResults(getGroupResults(groupRequest, 0, response.getAggregations(), commonColumnMetadata));
 +    return groupResponse;
 +  }
 +
 +  private List<GroupResult> getGroupResults(GroupRequest groupRequest, int index, Aggregations aggregations, Map<String, FieldType> commonColumnMetadata) {
 +    List<Group> groups = groupRequest.getGroups();
 +    String field = groups.get(index).getField();
 +    Terms terms = aggregations.get(getGroupByAggregationName(field));
 +    List<GroupResult> searchResultGroups = new ArrayList<>();
 +    for(Bucket bucket: terms.getBuckets()) {
 +      GroupResult groupResult = new GroupResult();
 +      groupResult.setKey(formatKey(bucket.getKey(), commonColumnMetadata.get(field)));
 +      groupResult.setTotal(bucket.getDocCount());
 +      Optional<String> scoreField = groupRequest.getScoreField();
 +      if (scoreField.isPresent()) {
 +        Sum score = bucket.getAggregations().get(getSumAggregationName(scoreField.get()));
 +        groupResult.setScore(score.getValue());
 +      }
 +      if (index < groups.size() - 1) {
 +        groupResult.setGroupedBy(groups.get(index + 1).getField());
 +        groupResult.setGroupResults(getGroupResults(groupRequest, index + 1, bucket.getAggregations(), commonColumnMetadata));
 +      }
 +      searchResultGroups.add(groupResult);
 +    }
 +    return searchResultGroups;
 +  }
 +
 +  /**
 +   * Return the search hit based on the UUID and sensor type.
 +   * A callback can be specified to transform the hit into a type T.
 +   * If more than one hit happens, the first one will be returned.
 +   */
 +  <T> List<T> searchByGuids(Collection<String> guids, Collection<String> sensorTypes,
 +      Function<SearchHit, Optional<T>> callback) {
 +    if(guids == null || guids.isEmpty()) {
 +      return Collections.EMPTY_LIST;
 +    }
 +    QueryBuilder query = null;
 +    IdsQueryBuilder idsQuery = null;
 +    if (sensorTypes != null) {
 +      String[] types = sensorTypes.stream().map(sensorType -> sensorType + "_doc").toArray(String[]::new);
 +      idsQuery = QueryBuilders.idsQuery(types);
 +    } else {
 +      idsQuery = QueryBuilders.idsQuery();
 +    }
 +
 +    for(String guid : guids) {
 +      query = idsQuery.addIds(guid);
 +    }
 +
 +    SearchRequestBuilder request = client.prepareSearch()
 +        .setQuery(query)
 +        .setSize(guids.size())
 +        ;
 +    org.elasticsearch.action.search.SearchResponse response = request.get();
 +    SearchHits hits = response.getHits();
 +    List<T> results = new ArrayList<>();
 +    for (SearchHit hit : hits) {
 +      Optional<T> result = callback.apply(hit);
 +      if (result.isPresent()) {
 +        results.add(result.get());
 +      }
 +    }
 +    return results;
 +  }
 +
 +  private Optional<Document> toDocument(final String guid, SearchHit hit) {
 +    Long ts = 0L;
 +    String doc = hit.getSourceAsString();
 +    String sourceType = toSourceType(hit.getType());
 +    try {
 +      return Optional.of(new Document(doc, guid, sourceType, ts));
 +    } catch (IOException e) {
 +      throw new IllegalStateException("Unable to retrieve latest: " + e.getMessage(), e);
 +    }
 +  }
 +
 +  /**
 +   * Returns the source type based on a given doc type.
 +   * @param docType The document type.
 +   * @return The source type.
 +   */
 +  private String toSourceType(String docType) {
 +    return Iterables.getFirst(Splitter.on("_doc").split(docType), null);
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrSearchDao.java
----------------------------------------------------------------------
diff --cc metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrSearchDao.java
index e336037,0000000..272b96a
mode 100644,000000..100644
--- a/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrSearchDao.java
+++ b/metron-platform/metron-solr/src/main/java/org/apache/metron/solr/dao/SolrSearchDao.java
@@@ -1,317 -1,0 +1,317 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.metron.solr.dao;
 +
 +import com.fasterxml.jackson.core.JsonProcessingException;
 +import org.apache.metron.common.Constants;
 +import org.apache.metron.common.utils.JSONUtils;
 +import org.apache.metron.indexing.dao.AccessConfig;
 +import org.apache.metron.indexing.dao.search.GetRequest;
 +import org.apache.metron.indexing.dao.search.Group;
 +import org.apache.metron.indexing.dao.search.GroupOrder;
 +import org.apache.metron.indexing.dao.search.GroupOrderType;
 +import org.apache.metron.indexing.dao.search.GroupRequest;
 +import org.apache.metron.indexing.dao.search.GroupResponse;
 +import org.apache.metron.indexing.dao.search.GroupResult;
 +import org.apache.metron.indexing.dao.search.InvalidSearchException;
 +import org.apache.metron.indexing.dao.search.SearchDao;
 +import org.apache.metron.indexing.dao.search.SearchRequest;
 +import org.apache.metron.indexing.dao.search.SearchResponse;
 +import org.apache.metron.indexing.dao.search.SearchResult;
 +import org.apache.metron.indexing.dao.search.SortField;
 +import org.apache.metron.indexing.dao.search.SortOrder;
 +import org.apache.metron.indexing.dao.update.Document;
 +import org.apache.solr.client.solrj.SolrClient;
 +import org.apache.solr.client.solrj.SolrQuery;
 +import org.apache.solr.client.solrj.SolrQuery.ORDER;
 +import org.apache.solr.client.solrj.SolrServerException;
 +import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 +import org.apache.solr.client.solrj.response.FacetField;
 +import org.apache.solr.client.solrj.response.FacetField.Count;
 +import org.apache.solr.client.solrj.response.PivotField;
 +import org.apache.solr.client.solrj.response.QueryResponse;
 +import org.apache.solr.common.SolrDocument;
 +import org.apache.solr.common.SolrDocumentList;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import java.io.IOException;
 +import java.lang.invoke.MethodHandles;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Optional;
 +import java.util.stream.Collectors;
 +
 +import static org.apache.metron.common.Constants.SENSOR_TYPE;
 +
 +public class SolrSearchDao implements SearchDao {
 +
 +  private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 +
 +  private transient SolrClient client;
 +  private AccessConfig accessConfig;
 +
 +  public SolrSearchDao(SolrClient client, AccessConfig accessConfig) {
 +    this.client = client;
 +    this.accessConfig = accessConfig;
 +  }
 +
 +  @Override
 +  public SearchResponse search(SearchRequest searchRequest) throws InvalidSearchException {
 +    if (searchRequest.getQuery() == null) {
 +      throw new InvalidSearchException("Search query is invalid: null");
 +    }
 +    if (client == null) {
 +      throw new InvalidSearchException("Uninitialized Dao!  You must call init() prior to use.");
 +    }
 +    if (searchRequest.getSize() > accessConfig.getMaxSearchResults()) {
 +      throw new InvalidSearchException(
 +          "Search result size must be less than " + accessConfig.getMaxSearchResults());
 +    }
 +    try {
 +      SolrQuery query = buildSearchRequest(searchRequest);
 +      QueryResponse response = client.query(query);
 +      return buildSearchResponse(searchRequest, response);
 +    } catch (IOException | SolrServerException e) {
 +      String msg = e.getMessage();
 +      LOG.error(msg, e);
 +      throw new InvalidSearchException(msg, e);
 +    }
 +  }
 +
 +  @Override
 +  public GroupResponse group(GroupRequest groupRequest) throws InvalidSearchException {
 +    try {
 +      String groupNames = groupRequest.getGroups().stream().map(Group::getField).collect(
 +          Collectors.joining(","));
 +      SolrQuery query = new SolrQuery()
 +          .setStart(0)
 +          .setRows(0)
 +          .setQuery(groupRequest.getQuery());
 +      query.set("collection", getCollections(groupRequest.getIndices()));
 +      Optional<String> scoreField = groupRequest.getScoreField();
 +      if (scoreField.isPresent()) {
 +        query.set("stats", true);
 +        query.set("stats.field", String.format("{!tag=piv1 sum=true}%s", scoreField.get()));
 +      }
 +      query.set("facet", true);
 +      query.set("facet.pivot", String.format("{!stats=piv1}%s", groupNames));
 +      QueryResponse response = client.query(query);
 +      return buildGroupResponse(groupRequest, response);
 +    } catch (IOException | SolrServerException e) {
 +      String msg = e.getMessage();
 +      LOG.error(msg, e);
 +      throw new InvalidSearchException(msg, e);
 +    }
 +  }
 +
 +  @Override
 +  public Document getLatest(String guid, String collection) throws IOException {
 +    try {
 +      SolrDocument solrDocument = client.getById(collection, guid);
 +      return toDocument(solrDocument);
 +    } catch (SolrServerException e) {
 +      throw new IOException(e);
 +    }
 +  }
 +
 +  @Override
 +  public Iterable<Document> getAllLatest(List<GetRequest> getRequests) throws IOException {
 +    Map<String, Collection<String>> collectionIdMap = new HashMap<>();
 +    for (GetRequest getRequest: getRequests) {
 +      Collection<String> ids = collectionIdMap.getOrDefault(getRequest.getSensorType(), new HashSet<>());
 +      ids.add(getRequest.getGuid());
 +      collectionIdMap.put(getRequest.getSensorType(), ids);
 +    }
 +    try {
 +      List<Document> documents = new ArrayList<>();
 +      for (String collection: collectionIdMap.keySet()) {
 +        SolrDocumentList solrDocumentList = client.getById(collectionIdMap.get(collection),
 +            new SolrQuery().set("collection", collection));
 +        documents.addAll(solrDocumentList.stream().map(this::toDocument).collect(Collectors.toList()));
 +      }
 +      return documents;
 +    } catch (SolrServerException e) {
 +      throw new IOException(e);
 +    }
 +  }
 +
 +  protected SolrQuery buildSearchRequest(
 +      SearchRequest searchRequest) throws IOException, SolrServerException {
 +    SolrQuery query = new SolrQuery()
 +        .setStart(searchRequest.getFrom())
 +        .setRows(searchRequest.getSize())
 +        .setQuery(searchRequest.getQuery());
 +
 +    // handle sort fields
 +    for (SortField sortField : searchRequest.getSort()) {
 +      query.addSort(sortField.getField(), getSolrSortOrder(sortField.getSortOrder()));
 +    }
 +
 +    // handle search fields
-     Optional<List<String>> fields = searchRequest.getFields();
-     if (fields.isPresent()) {
-       fields.get().forEach(query::addField);
++    List<String> fields = searchRequest.getFields();
++    if (fields != null) {
++      fields.forEach(query::addField);
 +    }
 +
 +    //handle facet fields
-     Optional<List<String>> facetFields = searchRequest.getFacetFields();
-     if (facetFields.isPresent()) {
-       facetFields.get().forEach(query::addFacetField);
++    List<String> facetFields = searchRequest.getFacetFields();
++    if (facetFields != null) {
++      facetFields.forEach(query::addFacetField);
 +    }
 +
 +    query.set("collection", getCollections(searchRequest.getIndices()));
 +
 +    return query;
 +  }
 +
 +  private String getCollections(List<String> indices) throws IOException, SolrServerException {
 +    List<String> existingCollections = CollectionAdminRequest.listCollections(client);
 +    return indices.stream().filter(existingCollections::contains).collect(Collectors.joining(","));
 +  }
 +
 +  private SolrQuery.ORDER getSolrSortOrder(
 +      SortOrder sortOrder) {
 +    return sortOrder == SortOrder.DESC ?
 +        ORDER.desc : ORDER.asc;
 +  }
 +
 +  protected SearchResponse buildSearchResponse(
 +      SearchRequest searchRequest,
 +      QueryResponse solrResponse) {
 +
 +    SearchResponse searchResponse = new SearchResponse();
 +    SolrDocumentList solrDocumentList = solrResponse.getResults();
 +    searchResponse.setTotal(solrDocumentList.getNumFound());
 +
 +    // search hits --> search results
 +    List<SearchResult> results = solrDocumentList.stream()
 +        .map(solrDocument -> getSearchResult(solrDocument, searchRequest.getFields()))
 +        .collect(Collectors.toList());
 +    searchResponse.setResults(results);
 +
 +    // handle facet fields
-     Optional<List<String>> facetFields = searchRequest.getFacetFields();
-     if (facetFields.isPresent()) {
-       searchResponse.setFacetCounts(getFacetCounts(facetFields.get(), solrResponse));
++    List<String> facetFields = searchRequest.getFacetFields();
++    if (facetFields != null) {
++      searchResponse.setFacetCounts(getFacetCounts(facetFields, solrResponse));
 +    }
 +
 +    if (LOG.isDebugEnabled()) {
 +      String response;
 +      try {
 +        response = JSONUtils.INSTANCE.toJSON(searchResponse, false);
 +      } catch (JsonProcessingException e) {
 +        response = e.getMessage();
 +      }
 +      LOG.debug("Built search response; response={}", response);
 +    }
 +    return searchResponse;
 +  }
 +
-   protected SearchResult getSearchResult(SolrDocument solrDocument, Optional<List<String>> fields) {
++  protected SearchResult getSearchResult(SolrDocument solrDocument, List<String> fields) {
 +    SearchResult searchResult = new SearchResult();
 +    searchResult.setId((String) solrDocument.getFieldValue(Constants.GUID));
 +    final Map<String, Object> source = new HashMap<>();
-     if (fields.isPresent()) {
-       fields.get().forEach(field -> source.put(field, solrDocument.getFieldValue(field)));
++    if (fields != null) {
++      fields.forEach(field -> source.put(field, solrDocument.getFieldValue(field)));
 +    } else {
 +      solrDocument.getFieldNames().forEach(field -> source.put(field, solrDocument.getFieldValue(field)));
 +    }
 +    searchResult.setSource(source);
 +    return searchResult;
 +  }
 +
 +  protected Map<String, Map<String, Long>> getFacetCounts(List<String> fields,
 +      QueryResponse solrResponse) {
 +    Map<String, Map<String, Long>> fieldCounts = new HashMap<>();
 +    for (String field : fields) {
 +      Map<String, Long> valueCounts = new HashMap<>();
 +      FacetField facetField = solrResponse.getFacetField(field);
 +      for (Count facetCount : facetField.getValues()) {
 +        valueCounts.put(facetCount.getName(), facetCount.getCount());
 +      }
 +      fieldCounts.put(field, valueCounts);
 +    }
 +    return fieldCounts;
 +  }
 +
 +  /**
 +   * Build a group response.
 +   * @param groupRequest The original group request.
 +   * @param response The search response.
 +   * @return A group response.
 +   */
 +  protected GroupResponse buildGroupResponse(
 +      GroupRequest groupRequest,
 +      QueryResponse response) {
 +    String groupNames = groupRequest.getGroups().stream().map(Group::getField).collect(
 +        Collectors.joining(","));
 +    List<PivotField> pivotFields = response.getFacetPivot().get(groupNames);
 +    GroupResponse groupResponse = new GroupResponse();
 +    groupResponse.setGroupedBy(groupRequest.getGroups().get(0).getField());
 +    groupResponse.setGroupResults(getGroupResults(groupRequest, 0, pivotFields));
 +    return groupResponse;
 +  }
 +
 +  protected List<GroupResult> getGroupResults(GroupRequest groupRequest, int index, List<PivotField> pivotFields) {
 +    List<Group> groups = groupRequest.getGroups();
 +    List<GroupResult> searchResultGroups = new ArrayList<>();
 +    final GroupOrder groupOrder = groups.get(index).getOrder();
 +    pivotFields.sort((o1, o2) -> {
 +      String s1 = groupOrder.getGroupOrderType() == GroupOrderType.TERM ?
 +          o1.getValue().toString() : Integer.toString(o1.getCount());
 +      String s2 = groupOrder.getGroupOrderType() == GroupOrderType.TERM ?
 +          o2.getValue().toString() : Integer.toString(o2.getCount());
 +      if (groupOrder.getSortOrder() == SortOrder.ASC) {
 +        return s1.compareTo(s2);
 +      } else {
 +        return s2.compareTo(s1);
 +      }
 +    });
 +
 +    for(PivotField pivotField: pivotFields) {
 +      GroupResult groupResult = new GroupResult();
 +      groupResult.setKey(pivotField.getValue().toString());
 +      groupResult.setTotal(pivotField.getCount());
 +      Optional<String> scoreField = groupRequest.getScoreField();
 +      if (scoreField.isPresent()) {
 +        groupResult.setScore((Double) pivotField.getFieldStatsInfo().get("score").getSum());
 +      }
 +      if (index < groups.size() - 1) {
 +        groupResult.setGroupedBy(groups.get(index + 1).getField());
 +        groupResult.setGroupResults(getGroupResults(groupRequest, index + 1, pivotField.getPivot()));
 +      }
 +      searchResultGroups.add(groupResult);
 +    }
 +    return searchResultGroups;
 +  }
 +
 +  protected Document toDocument(SolrDocument solrDocument) {
 +    Map<String, Object> document = new HashMap<>();
 +    solrDocument.getFieldNames().stream()
 +        .filter(name -> !name.equals(SolrDao.VERSION_FIELD))
 +        .forEach(name -> document.put(name, solrDocument.getFieldValue(name)));
 +    return new Document(document,
 +        (String) solrDocument.getFieldValue(Constants.GUID),
 +        (String) solrDocument.getFieldValue(SENSOR_TYPE), 0L);
 +  }
 +}


[04/50] [abbrv] metron git commit: METRON-1299 In MetronError tests, don't test for HostName if getHostName wouldn't work closes apache/incubator-metron#924

Posted by rm...@apache.org.
METRON-1299 In MetronError tests, don't test for HostName if getHostName wouldn't work closes apache/incubator-metron#924


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/85d12475
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/85d12475
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/85d12475

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 85d12475cf363f0b9d6bdd973ec9860550f73980
Parents: e69ce21
Author: ottobackwards <ot...@gmail.com>
Authored: Thu Mar 15 14:13:44 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Thu Mar 15 14:13:44 2018 -0400

----------------------------------------------------------------------
 .../metron/common/error/MetronErrorTest.java      | 18 +++++++++++++++---
 1 file changed, 15 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/85d12475/metron-platform/metron-common/src/test/java/org/apache/metron/common/error/MetronErrorTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/test/java/org/apache/metron/common/error/MetronErrorTest.java b/metron-platform/metron-common/src/test/java/org/apache/metron/common/error/MetronErrorTest.java
index 5e505a8..e7390de 100644
--- a/metron-platform/metron-common/src/test/java/org/apache/metron/common/error/MetronErrorTest.java
+++ b/metron-platform/metron-common/src/test/java/org/apache/metron/common/error/MetronErrorTest.java
@@ -18,15 +18,16 @@
 package org.apache.metron.common.error;
 
 import com.google.common.collect.Sets;
-import com.google.common.primitives.Bytes;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.metron.common.Constants;
 import org.json.simple.JSONObject;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.net.InetAddress;
+import java.net.UnknownHostException;
 import java.util.Arrays;
 
-import static org.apache.metron.common.Constants.ErrorFields.RAW_MESSAGE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -53,7 +54,18 @@ public class MetronErrorTest {
     assertEquals(Constants.ErrorType.PARSER_ERROR.getType(), errorJSON.get(Constants.ErrorFields.ERROR_TYPE.getName()));
     assertEquals("error", errorJSON.get(Constants.SENSOR_TYPE));
     assertEquals("sensorType", errorJSON.get(Constants.ErrorFields.FAILED_SENSOR_TYPE.getName()));
-    assertTrue(((String) errorJSON.get(Constants.ErrorFields.HOSTNAME.getName())).length() > 0);
+
+    String hostName = null;
+    try {
+      hostName = InetAddress.getLocalHost().getHostName();
+    } catch (UnknownHostException uhe) {
+      // unable to get the hostname on this machine, don't test it
+    }
+
+    if (!StringUtils.isEmpty(hostName)) {
+      assertTrue(((String) errorJSON.get(Constants.ErrorFields.HOSTNAME.getName())).length() > 0);
+      assertEquals(hostName, (String) errorJSON.get(Constants.ErrorFields.HOSTNAME.getName()));
+    }
     assertTrue(((long) errorJSON.get(Constants.ErrorFields.TIMESTAMP.getName())) > 0);
   }
 


[38/50] [abbrv] metron git commit: METRON-1515: Errors loading stellar functions currently bomb the entire topology, they should be recoverable closes apache/incubator-metron#985

Posted by rm...@apache.org.
METRON-1515: Errors loading stellar functions currently bomb the entire topology, they should be recoverable closes apache/incubator-metron#985


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/3fcbf8b4
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/3fcbf8b4
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/3fcbf8b4

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 3fcbf8b4e4e38f9c50842b8af857092b091c7c40
Parents: 1d3e7fc
Author: cstella <ce...@gmail.com>
Authored: Mon Apr 16 15:12:11 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Mon Apr 16 15:12:11 2018 -0400

----------------------------------------------------------------------
 .../resolver/ClasspathFunctionResolver.java     | 45 +++++++++++++++-----
 .../resolver/ClasspathFunctionResolverTest.java | 30 +++++++++++++
 2 files changed, 65 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/3fcbf8b4/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolver.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolver.java b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolver.java
index 85aa015..b17233a 100644
--- a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolver.java
+++ b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolver.java
@@ -34,6 +34,7 @@ import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.stellar.dsl.Stellar;
 import org.apache.metron.stellar.dsl.StellarFunction;
 
+import org.atteo.classindex.ClassFilter;
 import org.atteo.classindex.ClassIndex;
 import org.reflections.util.FilterBuilder;
 
@@ -219,6 +220,17 @@ public class ClasspathFunctionResolver extends BaseFunctionResolver {
     }
   }
 
+  protected Iterable<Class<?>> getStellarClasses(ClassLoader cl) {
+    return ClassIndex.getAnnotated(Stellar.class, cl);
+  }
+
+  protected boolean includeClass(Class<?> c, FilterBuilder filterBuilder)
+  {
+    boolean isAssignable = StellarFunction.class.isAssignableFrom(c);
+    boolean isFiltered = filterBuilder.apply(c.getCanonicalName());
+    return isAssignable && isFiltered;
+  }
+
   /**
    * Returns a set of classes that should undergo further interrogation for resolution
    * (aka discovery) of Stellar functions.
@@ -254,16 +266,29 @@ public class ClasspathFunctionResolver extends BaseFunctionResolver {
     Set<String> classes = new HashSet<>();
     Set<Class<? extends StellarFunction>> ret = new HashSet<>();
     for(ClassLoader cl : cls) {
-      for(Class<?> c : ClassIndex.getAnnotated(Stellar.class, cl)) {
-        LOG.debug("{}: Found class: {}", cl.getClass().getCanonicalName(), c.getCanonicalName());
-        boolean isAssignable = StellarFunction.class.isAssignableFrom(c);
-        boolean isFiltered = filterBuilder.apply(c.getCanonicalName());
-        if( isAssignable && isFiltered ) {
-          String className = c.getName();
-          if(!classes.contains(className)) {
-            LOG.debug("{}: Added class: {}", cl.getClass().getCanonicalName(), className);
-            ret.add((Class<? extends StellarFunction>) c);
-            classes.add(className);
+      for(Class<?> c : getStellarClasses(cl)) {
+        try {
+          LOG.debug("{}: Found class: {}", cl.getClass().getCanonicalName(), c.getCanonicalName());
+          if (includeClass(c, filterBuilder)) {
+            String className = c.getName();
+            if (!classes.contains(className)) {
+              LOG.debug("{}: Added class: {}", cl.getClass().getCanonicalName(), className);
+              ret.add((Class<? extends StellarFunction>) c);
+              classes.add(className);
+            }
+          }
+        }
+        catch(Error le) {
+          //we have had some error loading a stellar function.  This could mean that
+          //the classpath is unstable (e.g. old copies of jars are on the classpath).
+          try {
+            LOG.error("Skipping class " + c.getName() + ": " + le.getMessage()
+                    + ", please check that there are not old versions of stellar functions on the classpath.", le);
+          }
+          catch(Error ie) {
+            //it's possible that getName() will throw an exception if the class is VERY malformed.
+            LOG.error("Skipping class: " + le.getMessage()
+                    + ", please check that there are not old versions of stellar functions on the classpath.", le);
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/metron/blob/3fcbf8b4/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolverTest.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolverTest.java b/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolverTest.java
index 1d37f99..cc5bc7c 100644
--- a/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolverTest.java
+++ b/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/dsl/functions/resolver/ClasspathFunctionResolverTest.java
@@ -18,20 +18,27 @@
 
 package org.apache.metron.stellar.dsl.functions.resolver;
 
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import org.apache.commons.vfs2.FileSystemException;
 import org.apache.metron.stellar.dsl.Context;
+import org.apache.metron.stellar.dsl.StellarFunction;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.reflections.util.FilterBuilder;
 
 import java.io.File;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Properties;
+import java.util.Set;
 
 import static org.apache.metron.stellar.dsl.functions.resolver.ClasspathFunctionResolver.Config.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class ClasspathFunctionResolverTest {
 
@@ -121,4 +128,27 @@ public class ClasspathFunctionResolverTest {
     Assert.assertTrue(functions.contains("NOW"));
   }
 
+  @Test
+  public void testInvalidStellarClass() throws Exception {
+    StellarFunction goodFunc = mock(StellarFunction.class);
+    StellarFunction badFunc = mock(StellarFunction.class);
+    ClasspathFunctionResolver resolver = new ClasspathFunctionResolver() {
+      @Override
+      protected Iterable<Class<?>> getStellarClasses(ClassLoader cl) {
+        return ImmutableList.of(goodFunc.getClass(), badFunc.getClass());
+      }
+
+      @Override
+      protected boolean includeClass(Class<?> c, FilterBuilder filterBuilder) {
+        if(c != goodFunc.getClass()) {
+          throw new LinkageError("failed!");
+        }
+        return true;
+      }
+    };
+    Set<Class<? extends StellarFunction>> funcs = resolver.resolvables();
+    Assert.assertEquals(1, funcs.size());
+    Assert.assertEquals(goodFunc.getClass(), Iterables.getFirst(funcs, null));
+  }
+
 }


[23/50] [abbrv] metron git commit: METRON-1462: Separate ES and Kibana from Metron Mpack (mmiklavc via mmiklavc) closes apache/metron#943

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/params.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/params.py
new file mode 100755
index 0000000..24f2306
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/params.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+
+def yamlify_variables(var) :
+  if isinstance(var, type(True)):
+    return str(var).lower()
+  else:
+    return var
+
+# server configurations
+config = Script.get_config()
+
+masters_also_are_datanodes = config['configurations']['elastic-site']['masters_also_are_datanodes']
+elastic_home = config['configurations']['elastic-sysconfig']['elastic_home']
+data_dir = config['configurations']['elastic-sysconfig']['data_dir']
+work_dir = config['configurations']['elastic-sysconfig']['work_dir']
+conf_dir = config['configurations']['elastic-sysconfig']['conf_dir']
+heap_size = config['configurations']['elastic-sysconfig']['heap_size']
+max_open_files = config['configurations']['elastic-sysconfig']['max_open_files']
+max_map_count = config['configurations']['elastic-sysconfig']['max_map_count']
+
+elastic_user = config['configurations']['elastic-env']['elastic_user']
+elastic_group = config['configurations']['elastic-env']['elastic_group']
+log_dir = config['configurations']['elastic-env']['elastic_log_dir']
+pid_dir = config['configurations']['elastic-env']['elastic_pid_dir']
+
+hostname = config['hostname']
+java64_home = config['hostLevelParams']['java_home']
+elastic_env_sh_template = config['configurations']['elastic-env']['content']
+sysconfig_template = config['configurations']['elastic-sysconfig']['content']
+
+cluster_name = config['configurations']['elastic-site']['cluster_name']
+zen_discovery_ping_unicast_hosts = config['configurations']['elastic-site']['zen_discovery_ping_unicast_hosts']
+
+path_data = config['configurations']['elastic-site']['path_data']
+http_cors_enabled = config['configurations']['elastic-site']['http_cors_enabled']
+http_port = config['configurations']['elastic-site']['http_port']
+transport_tcp_port = config['configurations']['elastic-site']['transport_tcp_port']
+
+recover_after_time = config['configurations']['elastic-site']['recover_after_time']
+gateway_recover_after_data_nodes = config['configurations']['elastic-site']['gateway_recover_after_data_nodes']
+expected_data_nodes = config['configurations']['elastic-site']['expected_data_nodes']
+index_merge_scheduler_max_thread_count = config['configurations']['elastic-site']['index_merge_scheduler_max_thread_count']
+index_translog_flush_threshold_size = config['configurations']['elastic-site']['index_translog_flush_threshold_size']
+index_refresh_interval = config['configurations']['elastic-site']['index_refresh_interval']
+indices_memory_index_store_throttle_type = config['configurations']['elastic-site']['indices_memory_index_store_throttle_type']
+index_number_of_shards = config['configurations']['elastic-site']['index_number_of_shards']
+index_number_of_replicas = config['configurations']['elastic-site']['index_number_of_replicas']
+indices_memory_index_buffer_size = config['configurations']['elastic-site']['indices_memory_index_buffer_size']
+bootstrap_memory_lock = yamlify_variables(config['configurations']['elastic-site']['bootstrap_memory_lock'])
+threadpool_bulk_queue_size = config['configurations']['elastic-site']['threadpool_bulk_queue_size']
+cluster_routing_allocation_node_concurrent_recoveries = config['configurations']['elastic-site']['cluster_routing_allocation_node_concurrent_recoveries']
+cluster_routing_allocation_disk_watermark_low = config['configurations']['elastic-site']['cluster_routing_allocation_disk_watermark_low']
+cluster_routing_allocation_disk_threshold_enabled = yamlify_variables(config['configurations']['elastic-site']['cluster_routing_allocation_disk_threshold_enabled'])
+cluster_routing_allocation_disk_watermark_high = config['configurations']['elastic-site']['cluster_routing_allocation_disk_watermark_high']
+indices_fielddata_cache_size = config['configurations']['elastic-site']['indices_fielddata_cache_size']
+indices_cluster_send_refresh_mapping = yamlify_variables(config['configurations']['elastic-site']['indices_cluster_send_refresh_mapping'])
+threadpool_index_queue_size = config['configurations']['elastic-site']['threadpool_index_queue_size']
+
+discovery_zen_ping_timeout = config['configurations']['elastic-site']['discovery_zen_ping_timeout']
+discovery_zen_fd_ping_interval = config['configurations']['elastic-site']['discovery_zen_fd_ping_interval']
+discovery_zen_fd_ping_timeout = config['configurations']['elastic-site']['discovery_zen_fd_ping_timeout']
+discovery_zen_fd_ping_retries = config['configurations']['elastic-site']['discovery_zen_fd_ping_retries']
+
+network_host = config['configurations']['elastic-site']['network_host']
+network_publish_host = config['configurations']['elastic-site']['network_publish_host']
+
+limits_conf_dir = "/etc/security/limits.d"
+limits_conf_file = limits_conf_dir + "/elasticsearch.conf"
+elastic_user_nofile_limit = config['configurations']['elastic-env']['elastic_user_nofile_limit']
+elastic_user_nproc_limit = config['configurations']['elastic-env']['elastic_user_nproc_limit']
+elastic_user_memlock_soft_limit = config['configurations']['elastic-env']['elastic_user_memlock_soft_limit']
+elastic_user_memlock_hard_limit = config['configurations']['elastic-env']['elastic_user_memlock_hard_limit']
+
+# the status check (service elasticsearch status) cannot be run by the 'elasticsearch'
+# user due to the default permissions that are set when the package is installed.  the
+# status check must be run as root
+elastic_status_check_user = 'root'
+
+# when using the RPM or Debian packages on systems that use systemd, system limits
+# must be specified via systemd.
+# see https://www.elastic.co/guide/en/elasticsearch/reference/5.6/setting-system-settings.html#systemd
+systemd_parent_dir = '/etc/systemd/system/'
+systemd_elasticsearch_dir = systemd_parent_dir + 'elasticsearch.service.d/'
+systemd_override_file = systemd_elasticsearch_dir + 'override.conf'
+systemd_override_template = config['configurations']['elastic-systemd']['content']
+
+heap_size = config['configurations']['elastic-jvm-options']['heap_size']
+jvm_options_template = config['configurations']['elastic-jvm-options']['content']

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/properties_config.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/properties_config.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/properties_config.py
new file mode 100755
index 0000000..ef9f6dd
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/properties_config.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.resources.system import File
+from resource_management.core.source import InlineTemplate
+
+
+def properties_inline_template(configurations):
+    return InlineTemplate('''{% for key, value in configurations_dict.items() %}{{ key }}={{ value }}
+{% endfor %}''', configurations_dict=configurations)
+
+
+def properties_config(filename, configurations=None, conf_dir=None,
+                      mode=None, owner=None, group=None, brokerid=None):
+    config_content = properties_inline_template(configurations)
+    File(format("{conf_dir}/{filename}"), content=config_content, owner=owner,
+         group=group, mode=mode)

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/service_check.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/service_check.py
new file mode 100755
index 0000000..3ac7c83
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/service_check.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from __future__ import print_function
+
+import subprocess
+import sys
+import re
+
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.script import Script
+from resource_management.core.logger import Logger
+
+class ServiceCheck(Script):
+
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+        Logger.info("Running Elasticsearch service check")
+
+        port = self.get_port_from_range(params.http_port)
+        self.check_cluster_health(params.hostname, port)
+        self.index_document(params.hostname, port)
+
+        Logger.info("Elasticsearch service check successful")
+        exit(0)
+
+    def index_document(self, host, port, doc='{"name": "Ambari Service Check"}', index="ambari_service_check"):
+        """
+        Tests the health of Elasticsearch by indexing a document.
+
+        :param host: The name of a host running Elasticsearch.
+        :param port: The Elasticsearch HTTP port.
+        :param doc: The test document to put.
+        :param index: The name of the test index.
+        """
+        # put a document into a new index
+        Execute("curl -XPUT 'http://%s:%s/%s/test/1' -d '%s'" % (host, port, index, doc), logoutput=True)
+
+        # retrieve the document...  use subprocess because we actually need the results here.
+        cmd_retrieve = "curl -XGET 'http://%s:%s/%s/test/1'" % (host, port, index)
+        proc = subprocess.Popen(cmd_retrieve, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+        (stdout, stderr) = proc.communicate()
+        response_retrieve = stdout
+        Logger.info("Retrieval response is: %s" % response_retrieve)
+        expected_retrieve = '{"_index":"%s","_type":"test","_id":"1","_version":1,"found":true,"_source":%s}' \
+            % (index, doc)
+
+        # delete the test index
+        cmd_delete = "curl -XDELETE 'http://%s:%s/%s'" % (host, port, index)
+        proc = subprocess.Popen(cmd_delete, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+        (stdout, stderr) = proc.communicate()
+        response_delete = stdout
+        Logger.info("Delete index response is: %s" % response_retrieve)
+        expected_delete = '{"acknowledged":true}'
+
+        if (expected_retrieve == response_retrieve) and (expected_delete == response_delete):
+            Logger.info("Successfully indexed document in Elasticsearch")
+        else:
+            Logger.info("Unable to retrieve document from Elasticsearch")
+            sys.exit(1)
+
+    def check_cluster_health(self, host, port, status="green", timeout="120s"):
+        """
+        Checks Elasticsearch cluster health.  Will wait for a given health
+        state to be reached.
+
+        :param host: The name of a host running Elasticsearch.
+        :param port: The Elasticsearch HTTP port.
+        :param status: The expected cluster health state.  By default, green.
+        :param timeout: How long to wait for the cluster.  By default, 120 seconds.
+        """
+        Logger.info("Checking cluster health")
+
+        cmd = "curl -sS -XGET 'http://{0}:{1}/_cluster/health?wait_for_status={2}&timeout={3}' | grep '\"status\":\"{2}\"'"
+        Execute(cmd.format(host, port, status, timeout), logoutput=True, tries=5, try_sleep=10)
+
+    def get_port_from_range(self, port_range, delimiter="-", default="9200"):
+        """
+        Elasticsearch is configured with a range of ports to bind to, such as
+        9200-9300.  This function identifies a single port within the given range.
+
+        :param port_range: A range of ports that Elasticsearch binds to.
+        :param delimiter: The port range delimiter, by default "-".
+        :param default: If no port can be identified in the port_range, the default is returned.
+        :return A single port within the given range.
+        """
+        port = default
+        if delimiter in port_range:
+            ports = port_range.split(delimiter)
+            if len(ports) > 0:
+                port = ports[0]
+
+        return port
+
+
+if __name__ == "__main__":
+    ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/status_params.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/status_params.py
new file mode 100755
index 0000000..0629735
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/status_params.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+
+config = Script.get_config()
+
+elastic_pid_dir = config['configurations']['elastic-env']['elastic_pid_dir']
+elastic_pid_file = format("{elastic_pid_dir}/elasticsearch.pid")
+elastic_user = config['configurations']['elastic-env']['elastic_user']

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.master.yaml.j2
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.master.yaml.j2 b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.master.yaml.j2
new file mode 100755
index 0000000..8e20ba2
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.master.yaml.j2
@@ -0,0 +1,77 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+cluster:
+  name:   {{cluster_name}} 
+  routing:
+    allocation.node_concurrent_recoveries: {{cluster_routing_allocation_node_concurrent_recoveries}}
+    allocation.disk.watermark.low: {{cluster_routing_allocation_disk_watermark_low}}
+    allocation.disk.threshold_enabled: {{cluster_routing_allocation_disk_threshold_enabled}}
+    allocation.disk.watermark.high: {{cluster_routing_allocation_disk_watermark_high}}
+
+discovery:
+  zen:
+    ping:
+      unicast:
+        hosts: {{zen_discovery_ping_unicast_hosts}}
+
+node:
+  data: {{ masters_also_are_datanodes }}
+  master: true
+  name: {{hostname}}
+path:
+  data: {{path_data}}
+
+http:
+  port: {{http_port}}
+  cors.enabled: {{http_cors_enabled}}
+
+
+transport:
+  tcp:
+    port: {{transport_tcp_port}}
+
+gateway:
+  recover_after_data_nodes: {{gateway_recover_after_data_nodes}}
+  recover_after_time: {{recover_after_time}}
+  expected_data_nodes: {{expected_data_nodes}}
+# https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html
+indices:
+  store.throttle.type: {{indices_memory_index_store_throttle_type}}
+  memory:
+   index_buffer_size: {{indices_memory_index_buffer_size}}
+  fielddata:
+   cache.size: {{indices_fielddata_cache_size}}
+
+bootstrap:
+  memory_lock: {{bootstrap_memory_lock}}
+  system_call_filter: false
+
+thread_pool:
+  bulk:
+    queue_size: {{threadpool_bulk_queue_size}}
+  index:
+    queue_size: {{threadpool_index_queue_size}}
+
+discovery.zen.ping_timeout: {{discovery_zen_ping_timeout}}
+discovery.zen.fd.ping_interval: {{discovery_zen_fd_ping_interval}}
+discovery.zen.fd.ping_timeout: {{discovery_zen_fd_ping_timeout}}
+discovery.zen.fd.ping_retries: {{discovery_zen_fd_ping_retries}}
+
+network.host: {{network_host}}
+network.publish_host: {{network_publish_host}}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.slave.yaml.j2
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.slave.yaml.j2 b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.slave.yaml.j2
new file mode 100755
index 0000000..6bf8399
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.slave.yaml.j2
@@ -0,0 +1,78 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+cluster:
+  name:   {{cluster_name}} 
+  routing:
+    allocation.node_concurrent_recoveries: {{cluster_routing_allocation_node_concurrent_recoveries}}
+    allocation.disk.watermark.low: {{cluster_routing_allocation_disk_watermark_low}}
+    allocation.disk.threshold_enabled: {{cluster_routing_allocation_disk_threshold_enabled}}
+    allocation.disk.watermark.high: {{cluster_routing_allocation_disk_watermark_high}}
+
+discovery:
+  zen:
+    ping:
+      unicast:
+        hosts: {{zen_discovery_ping_unicast_hosts}}
+
+node:
+  data: true
+  master: false
+  name: {{hostname}}
+path:
+  data: {{path_data}}
+
+http:
+  port: {{http_port}}
+  cors.enabled: {{http_cors_enabled}}
+
+
+transport:
+  tcp:
+    port: {{transport_tcp_port}}
+
+gateway:
+  recover_after_data_nodes: {{gateway_recover_after_data_nodes}}
+  recover_after_time: {{recover_after_time}}
+  expected_data_nodes: {{expected_data_nodes}}
+
+# https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html
+indices:
+  store.throttle.type: {{indices_memory_index_store_throttle_type}}
+  memory:
+   index_buffer_size: {{indices_memory_index_buffer_size}}
+  fielddata:
+   cache.size: {{indices_fielddata_cache_size}}
+
+bootstrap:
+  memory_lock: {{bootstrap_memory_lock}}
+  system_call_filter: false
+
+thread_pool:
+  bulk:
+    queue_size: {{threadpool_bulk_queue_size}}
+  index:
+    queue_size: {{threadpool_index_queue_size}}
+
+discovery.zen.ping_timeout: {{discovery_zen_ping_timeout}}
+discovery.zen.fd.ping_interval: {{discovery_zen_fd_ping_interval}}
+discovery.zen.fd.ping_timeout: {{discovery_zen_fd_ping_timeout}}
+discovery.zen.fd.ping_retries: {{discovery_zen_fd_ping_retries}}
+
+network.host: {{network_host}}
+network.publish_host: {{network_publish_host}}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch_limits.conf.j2
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch_limits.conf.j2 b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch_limits.conf.j2
new file mode 100644
index 0000000..99f72e1
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch_limits.conf.j2
@@ -0,0 +1,20 @@
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+{{elastic_user}}	-	nproc  {{elastic_user_nproc_limit}}
+{{elastic_user}}	-	nofile {{elastic_user_nofile_limit}}
+{{elastic_user}}	soft	memlock	{{elastic_user_memlock_soft_limit}}
+{{elastic_user}}	hard	memlock	{{elastic_user_memlock_hard_limit}}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/quicklinks/quicklinks.json b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/quicklinks/quicklinks.json
new file mode 100644
index 0000000..909828b
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/quicklinks/quicklinks.json
@@ -0,0 +1,43 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"HTTP_ONLY"
+    },
+
+    "links": [
+      {
+        "name": "es_health_link",
+        "label": "Elasticsearch Health",
+        "requires_user_name": "false",
+        "component_name": "ES_MASTER",
+        "url":"%@://%@:%@/_cat/health?v",
+        "port":{
+          "http_property": "http_port",
+          "http_default_port": "9200",
+          "https_property": "http_port",
+          "https_default_port": "9200",
+          "regex": "^(\\d+)",
+          "site": "elastic-site"
+        }
+      },
+      {
+        "name": "es_indices_link",
+        "label": "Elasticsearch Indexes",
+        "requires_user_name": "false",
+        "component_name": "ES_MASTER",
+        "url":"%@://%@:%@/_cat/indices?v",
+        "port":{
+          "http_property": "http_port",
+          "http_default_port": "9200",
+          "https_property": "http_port",
+          "https_default_port": "9200",
+          "regex": "^(\\d+)",
+          "site": "elastic-site"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/role_command_order.json b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/role_command_order.json
new file mode 100755
index 0000000..130d018
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/role_command_order.json
@@ -0,0 +1,8 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "ELASTICSEARCH_SERVICE_CHECK-SERVICE_CHECK" : ["ES_MASTER-START", "ES_SLAVE-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-env.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-env.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-env.xml
new file mode 100755
index 0000000..1246405
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-env.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>kibana_user</name>
+    <value>kibana</value>
+    <property-type>USER</property-type>
+    <description>Service User for Kibana</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>kabana_group</name>
+    <value>kibana</value>
+    <property-type>GROUP</property-type>
+    <description>Service Group for Kibana</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property require-input="true">
+    <name>kibana_server_host</name>
+    <value>0.0.0.0</value>
+    <description>Host name or IP address that Kibana should bind to.</description>
+  </property>
+  <property require-input="true">
+    <name>kibana_log_dir</name>
+    <value>/var/log/kibana</value>
+    <description>Log directory for Kibana</description>
+  </property>
+  <property require-input="true">
+    <name>kibana_pid_dir</name>
+    <value>/var/run/kibana</value>
+    <description>PID directory for Kibana</description>
+  </property>
+  <property require-input="true">
+    <name>kibana_es_url</name>
+    <value></value>
+    <description>The Elasticsearch instance to use for all your queries. (http://eshost:9200)</description>
+  </property>
+  <property require-input="true">
+    <name>kibana_server_port</name>
+    <value>5000</value>
+    <description>Kibana back end server port to use.</description>
+  </property>
+  <property require-input="true">
+    <name>kibana_default_application</name>
+    <value>default</value>
+    <description>The default application to load.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-site.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-site.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-site.xml
new file mode 100755
index 0000000..d8d0513
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-site.xml
@@ -0,0 +1,113 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+    <!-- kibana.yml -->
+    <property>
+        <name>content</name>
+        <display-name>kibana.yml template</display-name>
+        <description>This is the jinja template for kibana.yml file</description>
+        <value>
+# Kibana is served by a back end server. This controls which port to use.
+server.port: {{ kibana_port }}
+
+# The host to bind the server to.
+# Kibana (like Elasticsearch) now binds to localhost for security purposes instead of 0.0.0.0 (all addresses). Previous binding to 0.0.0.0 also caused issues for Windows users.
+server.host: {{ kibana_server_host }}
+
+# If you are running kibana behind a proxy, and want to mount it at a path,
+# specify that path here. The basePath can't end in a slash.
+# server.basePath: ""
+
+# The maximum payload size in bytes on incoming server requests.
+# server.maxPayloadBytes: 1048576
+
+# The Elasticsearch instance to use for all your queries.
+elasticsearch.url: {{ es_url }}
+
+# preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false,
+# then the host you use to connect to *this* Kibana instance will be sent.
+# elasticsearch.preserveHost: true
+
+# Kibana uses an index in Elasticsearch to store saved searches, visualizations
+# and dashboards. It will create a new index if it doesn't already exist.
+# kibana.index: ".kibana"
+
+# The default application to load.
+kibana.defaultAppId: "{{ kibana_default_application }}"
+
+# If your Elasticsearch is protected with basic auth, these are the user credentials
+# used by the Kibana server to perform maintenance on the kibana_index at startup. Your Kibana
+# users will still need to authenticate with Elasticsearch (which is proxied through
+# the Kibana server)
+# elasticsearch.username: "user"
+# elasticsearch.password: "pass"
+
+# SSL for outgoing requests from the Kibana Server to the browser (PEM formatted)
+# server.ssl.cert: /path/to/your/server.crt
+# server.ssl.key: /path/to/your/server.key
+
+# Optional setting to validate that your Elasticsearch backend uses the same key files (PEM formatted)
+# elasticsearch.ssl.cert: /path/to/your/client.crt
+# elasticsearch.ssl.key: /path/to/your/client.key
+
+# If you need to provide a CA certificate for your Elasticsearch instance, put
+# the path of the pem file here.
+# elasticsearch.ssl.ca: /path/to/your/CA.pem
+
+# Set to false to have a complete disregard for the validity of the SSL
+# certificate.
+# elasticsearch.ssl.verify: true
+
+# Time in milliseconds to wait for elasticsearch to respond to pings, defaults to
+# request_timeout setting
+# elasticsearch.pingTimeout: 1500
+
+# Time in milliseconds to wait for responses from the back end or elasticsearch.
+# This must be > 0
+# elasticsearch.requestTimeout: 30000
+
+# Time in milliseconds for Elasticsearch to wait for responses from shards.
+# Set to 0 to disable.
+# elasticsearch.shardTimeout: 0
+
+# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying
+# elasticsearch.startupTimeout: 5000
+
+# Set the path to where you would like the process id file to be created.
+# pid.file: /var/run/kibana.pid
+
+# If you would like to send the log output to a file you can set the path below.
+logging.dest: {{ log_dir }}/kibana.log
+
+# Set this to true to suppress all logging output.
+# logging.silent: false
+
+# Set this to true to suppress all logging output except for error messages.
+# logging.quiet: false
+
+# Set this to true to log all events, including system usage information and all requests.
+# logging.verbose: false
+        </value>
+        <value-attributes>
+            <type>content</type>
+        </value-attributes>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/metainfo.xml b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/metainfo.xml
new file mode 100755
index 0000000..b542c54
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/metainfo.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>KIBANA</name>
+            <displayName>Kibana</displayName>
+            <comment>Kibana Dashboard</comment>
+            <version>5.6.2</version>
+            <components>
+                <component>
+                    <name>KIBANA_MASTER</name>
+                    <displayName>Kibana Server</displayName>
+                    <category>MASTER</category>
+                    <cardinality>1</cardinality>
+                    <commandScript>
+                        <script>scripts/kibana_master.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                </component>
+            </components>
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat6</osFamily>
+                    <packages>
+                        <package>
+                            <name>python-elasticsearch</name>
+                        </package>
+                        <package>
+                            <name>kibana-5.6.2</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+                <osSpecific>
+                    <osFamily>redhat7</osFamily>
+                    <packages>
+                        <package>
+                            <name>python-elasticsearch</name>
+                        </package>
+                        <package>
+                            <name>kibana-5.6.2</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+                <osSpecific>
+                    <osFamily>ubuntu14</osFamily>
+                    <packages>
+                        <package>
+                            <name>kibana=5.6.2</name>
+                        </package>
+                    </packages>
+                </osSpecific>
+            </osSpecifics>
+            <configuration-dependencies>
+                <config-type>kibana-env</config-type>
+                <config-type>kibana-site</config-type>
+            </configuration-dependencies>
+            <restartRequiredAfterChange>true</restartRequiredAfterChange>
+            <quickLinksConfigurations>
+                <quickLinksConfiguration>
+                    <fileName>quicklinks.json</fileName>
+                    <default>true</default>
+                </quickLinksConfiguration>
+            </quickLinksConfigurations>
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/common.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/common.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/common.py
new file mode 100644
index 0000000..37100cd
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/common.py
@@ -0,0 +1,56 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.get_user_call_output import get_user_call_output
+from resource_management.core.exceptions import ExecutionFailed
+from resource_management.core.exceptions import ComponentIsNotRunning
+
+def service_check(cmd, user, label):
+    """
+    Executes a service check command that adheres to LSB-compliant
+    return codes.  The return codes are interpreted as defined
+    by the LSB.
+
+    See http://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/iniscrptact.html
+    for more information.
+
+    :param cmd: The service check command to execute.
+    :param label: The name of the service.
+    """
+    Logger.info("Performing service check; cmd={0}, user={1}, label={2}".format(cmd, user, label))
+    rc, out, err = get_user_call_output(cmd, user, is_checked_call=False)
+
+    if len(err) > 0:
+      Logger.error(err)
+
+    if rc in [1, 2, 3]:
+      # if return code in [1, 2, 3], then 'program is not running' or 'program is dead'
+      Logger.info("{0} is not running".format(label))
+      raise ComponentIsNotRunning()
+
+    elif rc == 0:
+      # if return code = 0, then 'program is running or service is OK'
+      Logger.info("{0} is running".format(label))
+
+    else:
+      # else service state is unknown
+      err_msg = "{0} service check failed; cmd '{1}' returned {2}".format(label, cmd, rc)
+      Logger.error(err_msg)
+      raise ExecutionFailed(err_msg, rc, out, err)

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/kibana_master.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/kibana_master.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/kibana_master.py
new file mode 100755
index 0000000..c013ea3
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/kibana_master.py
@@ -0,0 +1,81 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.core.source import InlineTemplate
+from resource_management.libraries.functions.format import format as ambari_format
+from resource_management.libraries.script import Script
+
+from common import service_check
+
+class Kibana(Script):
+
+    def install(self, env):
+        import params
+        env.set_params(params)
+        Logger.info("Installing Kibana")
+        self.install_packages(env)
+
+    def configure(self, env, upgrade_type=None, config_dir=None):
+        import params
+        env.set_params(params)
+        Logger.info("Configuring Kibana")
+
+        directories = [params.log_dir, params.pid_dir, params.conf_dir]
+        Directory(directories,
+                  mode=0755,
+                  owner=params.kibana_user,
+                  group=params.kibana_user
+                  )
+
+        File("{0}/kibana.yml".format(params.conf_dir),
+             owner=params.kibana_user,
+             content=InlineTemplate(params.kibana_yml_template)
+             )
+
+    def stop(self, env, upgrade_type=None):
+        import params
+        env.set_params(params)
+        Logger.info("Stopping Kibana")
+        Execute("service kibana stop")
+
+    def start(self, env, upgrade_type=None):
+        import params
+        env.set_params(params)
+        self.configure(env)
+        Logger.info("Starting Kibana")
+        Execute("service kibana start")
+
+    def restart(self, env):
+        import params
+        env.set_params(params)
+        self.configure(env)
+        Logger.info("Restarting Kibana")
+        Execute("service kibana restart")
+
+    def status(self, env):
+        import params
+        env.set_params(params)
+        Logger.info('Status check Kibana')
+        service_check("service kibana status", user=params.kibana_user, label="Kibana")
+
+if __name__ == "__main__":
+    Kibana().execute()

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/params.py b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/params.py
new file mode 100755
index 0000000..ef4cb62
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/params.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Kibana Params configurations
+
+"""
+
+from urlparse import urlparse
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.script import Script
+
+# server configurations
+config = Script.get_config()
+
+kibana_home = '/usr/share/kibana/'
+kibana_bin = '/usr/share/kibana/bin/'
+
+conf_dir = "/etc/kibana"
+kibana_user = config['configurations']['kibana-env']['kibana_user']
+kibana_group = config['configurations']['kibana-env']['kibana_group']
+log_dir = config['configurations']['kibana-env']['kibana_log_dir']
+pid_dir = config['configurations']['kibana-env']['kibana_pid_dir']
+pid_file = format("{pid_dir}/kibanasearch.pid")
+es_url = config['configurations']['kibana-env']['kibana_es_url']
+parsed = urlparse(es_url)
+es_host = parsed.netloc.split(':')[0]
+es_port = parsed.netloc.split(':')[1]
+kibana_port = config['configurations']['kibana-env']['kibana_server_port']
+kibana_server_host = config['configurations']['kibana-env']['kibana_server_host']
+kibana_default_application = config['configurations']['kibana-env']['kibana_default_application']
+hostname = config['hostname']
+java64_home = config['hostLevelParams']['java_home']
+kibana_yml_template = config['configurations']['kibana-site']['content']
+

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/quicklinks/quicklinks.json b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/quicklinks/quicklinks.json
new file mode 100755
index 0000000..448e102
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/common-services/KIBANA/5.6.2/quicklinks/quicklinks.json
@@ -0,0 +1,28 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"HTTP_ONLY"
+    },
+
+    "links": [
+      {
+        "name": "metron_ui",
+        "label": "Metron UI",
+        "requires_user_name": "false",
+        "component_name": "KIBANA_MASTER",
+        "url":"%@://%@:%@/",
+        "port":{
+          "http_property": "kibana_server_port",
+          "http_default_port": "5601",
+          "https_property": "kibana_server_port",
+          "https_default_port": "5601",
+          "regex": "^(\\d+)$",
+          "site": "kibana-env"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/mpack.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/mpack.json b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/mpack.json
new file mode 100644
index 0000000..1b5400c
--- /dev/null
+++ b/metron-deployment/packaging/ambari/elasticsearch-mpack/src/main/resources/mpack.json
@@ -0,0 +1,76 @@
+{
+  "type": "full-release",
+  "name": "elasticsearch-ambari.mpack",
+  "version": "5.6.2",
+  "description": "Ambari Management Pack for Elasticsearch and Kibana",
+  "prerequisites": {
+    "min-ambari-version": "2.4.0.0",
+    "min-stack-versions": [
+      {
+        "stack_name": "HDP",
+        "stack_version": "2.3.0"
+      }
+    ]
+  },
+  "artifacts": [
+    {
+      "name": "ELASTICSEARCH-common-services",
+      "type" : "service-definitions",
+      "source_dir" : "common-services"
+    },
+    {
+      "name" : "ELASTICSEARCH-addon-services",
+      "type" : "stack-addon-service-definitions",
+      "source_dir": "addon-services",
+      "service_versions_map": [
+        {
+          "service_name" : "KIBANA",
+          "service_version" : "5.6.2",
+          "applicable_stacks" : [
+            {
+              "stack_name" : "HDP",
+              "stack_version" : "2.3"
+            },
+            {
+              "stack_name" : "HDP",
+              "stack_version" : "2.4"
+            },
+            {
+              "stack_name" : "HDP",
+              "stack_version" : "2.5"
+            },
+            {
+              "stack_name" : "HDP",
+              "stack_version" : "2.6"
+            }
+
+          ]
+        },
+        {
+          "service_name" : "ELASTICSEARCH",
+          "service_version" : "5.6.2",
+          "applicable_stacks" : [
+            {
+              "stack_name" : "HDP",
+              "stack_version" : "2.3"
+            },
+            {
+              "stack_name" : "HDP",
+              "stack_version" : "2.4"
+            },
+            {
+              "stack_name" : "HDP",
+              "stack_version" : "2.5"
+            },
+            {
+              "stack_name" : "HDP",
+              "stack_version" : "2.6"
+            }
+
+
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/README.md b/metron-deployment/packaging/ambari/metron-mpack/README.md
index 5179b5f..cd9399d 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/README.md
+++ b/metron-deployment/packaging/ambari/metron-mpack/README.md
@@ -86,15 +86,11 @@ Storm (and the Metron topologies) must be restarted after Metron is installed on
 
 Kerberizing a cluster with a pre-existing Metron, automatically restarts all services during Kerberization.  No additional manual restart is needed in this case.
 
-#### Zeppelin Import
-
-A custom action is available in Ambari to import Zeppelin dashboards. See the [metron-indexing documentation](../../../../metron-platform/metron-indexing) for more information.
-
 #### Kibana Dashboards
 
 The dashboards installed by the Kibana custom action are managed by two JSON files:
-* metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/kibana.template
-* metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboard-bulkload.json
+* metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/kibana.template
+* metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboard-bulkload.json
 
 The first file, `kibana.template`, is an Elasticsearch template that specifies the proper mapping types for the Kibana index. This configuration is necessary due to a bug
 in the default dynamic mappings provided by Elasticsearch for long types versus integer that are incompatible with Kibana \[1\]. The second file, `dashboard-bulkload.json`,
@@ -102,7 +98,7 @@ contains all of the dashboard metadata necessary to create the Metron dashboard.
 of documents necessary for setting up the dashboard in Elasticsearch. The main features installed are index patterns, searches, and a variety of visualizations
 that are used in the Metron dashboard.
 
-Deploying the existing dashboard is easy. Once the MPack is installed, run the Kibana service's action "Load Template" to install dashboards.  This will no longer overwrite
+Deploying the existing dashboard is easy. Once the MPack is installed, run the Metron service's action "Load Template" to install dashboards.  This will no longer overwrite
 the .kibana in Elasticsearch. The bulk load is configured to fail inserts for existing documents. If you want to _completely_ reload the dashboard, you would need to delete
 the .kibana index and reload again from Ambari.
 
@@ -115,7 +111,7 @@ You can modify dashboards in Kibana and bring those changes into the core MPack
 
 1. Export the .kibana index from ES
 2. Convert the data into the ES bulk load format
-3. Replace the dashboard-bulkload.json file in the Kibana MPack.
+3. Replace the dashboard-bulkload.json file in the Metron MPack.
 
 You can export the .kibana index using a tool like [https://github.com/taskrabbit/elasticsearch-dump](https://github.com/taskrabbit/elasticsearch-dump). The important
 feature is to have one document per line. Here's an exmaple export using elasticsearch-dump
@@ -166,14 +162,14 @@ To create a new version of the file, make any necessary changes to Kibana (e.g.
 
 **Saving a Backup**
 ```
-python packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboardindex.py \
+python packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboardindex.py \
 $ES_HOST 9200 \
 ~/dashboard.p -s
 ```
 
 **Restoring From a Backup**
 ```
-python packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboardindex.py \
+python packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboardindex.py \
 $ES_HOST 9200 \
 ~/dashboard.p
 ```
@@ -181,6 +177,10 @@ $ES_HOST 9200 \
 **Note**: This method of writing the Kibana dashboard to Elasticsearch will overwrite the entire .kibana index. Be sure to first backup the index first using either the new JSON
 method described above, or writing out the dashboard.p pickle file using the old method (passing -s option to dashboardindex.py) described here.
 
+#### Zeppelin Import
+
+A custom action is available in Ambari to import Zeppelin dashboards. See the [metron-indexing documentation](../../../../metron-platform/metron-indexing) for more information.
+
 #### Offline Installation
 
 Retrieval of the GeoIP database is the only point during installation that reaches out to the internet. For an offline installation, the URL for the GeoIP database can be manually set to a local path on the file system such as  `file:///home/root/geoip/GeoLite2-City.mmdb.gz`.

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/assemblies/metron-mpack.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/assemblies/metron-mpack.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/assemblies/metron-mpack.xml
index ec4272e..81087a2 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/assemblies/metron-mpack.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/assemblies/metron-mpack.xml
@@ -30,24 +30,10 @@
             <filtered>true</filtered>
         </fileSet>
         <fileSet>
-            <directory>src/main/resources/common-services</directory>
-            <outputDirectory>common-services</outputDirectory>
-            <excludes>
-                <exclude>**/CURRENT/**</exclude>
-            </excludes>
-        </fileSet>
-        <fileSet>
             <directory>src/main/resources/addon-services/METRON/CURRENT</directory>
             <outputDirectory>addon-services/METRON/${metron.version}</outputDirectory>
             <filtered>true</filtered>
         </fileSet>
-        <fileSet>
-            <directory>src/main/resources/addon-services</directory>
-            <outputDirectory>addon-services</outputDirectory>
-            <excludes>
-                <exclude>**/CURRENT/**</exclude>
-            </excludes>
-        </fileSet>
     </fileSets>
     <files>
         <file>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/metainfo.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/metainfo.xml
deleted file mode 100755
index accf7da..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/metainfo.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<metainfo>
-    <schemaVersion>2.0</schemaVersion>
-    <services>
-        <service>
-            <name>ELASTICSEARCH</name>
-            <version>5.6.2</version>
-            <extends>common-services/ELASTICSEARCH/5.6.2</extends>
-        </service>
-    </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/repos/repoinfo.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/repos/repoinfo.xml
deleted file mode 100644
index ba21fb1..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/ELASTICSEARCH/5.6.2/repos/repoinfo.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-       http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-    <os family="redhat6">
-        <repo>
-            <baseurl>https://artifacts.elastic.co/packages/5.x/yum</baseurl>
-            <repoid>elasticsearch-5.x</repoid>
-            <reponame>ELASTICSEARCH</reponame>
-        </repo>
-    </os>
-    <os family="redhat7">
-        <repo>
-            <baseurl>https://artifacts.elastic.co/packages/5.x/yum</baseurl>
-            <repoid>elasticsearch-5.x</repoid>
-            <reponame>ELASTICSEARCH</reponame>
-        </repo>
-    </os>
-    <os family="ubuntu14">
-        <!--
-            see details about Ambari fixes for Ubuntu introduced in Ambari 2.6+
-                https://github.com/apache/ambari/commit/f8b29df9685b443d4a5c06c6e1725e4428c95b49#diff-6f26c26ed59462200d018c5e1e71e773
-                https://issues.apache.org/jira/browse/AMBARI-21856
-        -->
-        <repo>
-            <baseurl>https://artifacts.elastic.co/packages/5.x/apt</baseurl>
-            <repoid>elasticsearch-5.x</repoid>
-            <reponame>ELASTICSEARCH</reponame>
-            <distribution>stable</distribution>
-        </repo>
-    </os>
-</reposinfo>
-

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/metainfo.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/metainfo.xml
deleted file mode 100755
index 8a4fba2..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<metainfo>
-    <schemaVersion>2.0</schemaVersion>
-    <services>
-        <service>
-            <name>KIBANA</name>
-            <version>5.6.2</version>
-            <extends>common-services/KIBANA/5.6.2</extends>
-        </service>
-    </services>
-</metainfo>
-

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/quicklinks/quicklinks.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/quicklinks/quicklinks.json
deleted file mode 100755
index 622a512..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"HTTP_ONLY"
-    },
-
-    "links": [
-      {
-        "name": "kibana_master_ui",
-        "label": "Metron Dashboard",
-        "requires_user_name": "false",
-        "url":"%@://%@:%@/",
-        "port":{
-          "http_property": "kibana_server_port",
-          "http_default_port": "5601",
-          "https_property": "kibana_server_port",
-          "https_default_port": "5601",
-          "regex": "^(\\d+)$",
-          "site": "kibana-env"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/repos/repoinfo.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/repos/repoinfo.xml
deleted file mode 100644
index 2755818..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/addon-services/KIBANA/5.6.2/repos/repoinfo.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-       http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-    <os family="redhat6">
-        <repo>
-            <baseurl>https://artifacts.elastic.co/packages/5.x/yum</baseurl>
-            <repoid>kibana-5.x</repoid>
-            <reponame>KIBANA</reponame>
-        </repo>
-        <repo>
-            <baseurl>http://packages.elastic.co/curator/5/centos/6</baseurl>
-            <repoid>ES-Curator-5.x</repoid>
-            <reponame>CURATOR</reponame>
-        </repo>
-    </os>
-    <os family="redhat7">
-        <repo>
-            <baseurl>https://artifacts.elastic.co/packages/5.x/yum</baseurl>
-            <repoid>kibana-5.x</repoid>
-            <reponame>KIBANA</reponame>
-        </repo>
-        <repo>
-            <baseurl>http://packages.elastic.co/curator/5/centos/7</baseurl>
-            <repoid>ES-Curator-5.x</repoid>
-            <reponame>CURATOR</reponame>
-        </repo>
-    </os>
-    <os family="ubuntu14">
-        <!--
-            see details about Ambari fixes for Ubuntu introduced in Ambari 2.6+
-                https://github.com/apache/ambari/commit/f8b29df9685b443d4a5c06c6e1725e4428c95b49#diff-6f26c26ed59462200d018c5e1e71e773
-                https://issues.apache.org/jira/browse/AMBARI-21856
-        -->
-        <repo>
-            <baseurl>https://artifacts.elastic.co/packages/5.x/apt</baseurl>
-            <repoid>kibana-5.x</repoid>
-            <reponame>KIBANA</reponame>
-            <distribution>stable</distribution>
-        </repo>
-        <repo>
-            <baseurl>https://packages.elastic.co/curator/5/debian</baseurl>
-            <repoid>ES-Curator-5.x</repoid>
-            <reponame>CURATOR</reponame>
-            <distribution>stable</distribution>
-        </repo>
-    </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-env.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-env.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-env.xml
deleted file mode 100755
index 9e4f8ad..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-env.xml
+++ /dev/null
@@ -1,86 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>elastic_user</name>
-    <value>elasticsearch</value>
-    <property-type>USER</property-type>
-    <description>Service user for Elasticsearch</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-  <property>
-    <name>elastic_group</name>
-    <value>elasticsearch</value>
-    <property-type>GROUP</property-type>
-    <description>Service group for Elasticsearch</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-  <property>
-    <name>elastic_log_dir</name>
-    <value>/var/log/elasticsearch</value>
-    <description>Log directory for elastic</description>
-  </property>
-  <property>
-    <name>elastic_pid_dir</name>
-    <value>/var/run/elasticsearch</value>
-    <description>The directory for pid files</description>
-  </property>
-  <!-- elasticsearch-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for elastic-env.sh file</description>
-    <value>
-#!/bin/bash
-
-# Set ELASTICSEARCH specific environment variables here.
-
-# The java implementation to use.
-export JAVA_HOME={{java64_home}}
-export PATH=$PATH:$JAVA_HOME/bin
-    </value>
-  </property>
-  <property>
-    <name>elastic_user_nofile_limit</name>
-    <value>65536</value>
-    <description>Max open file limit for Elasticsearch user.</description>
-  </property>
-  <property>
-    <name>elastic_user_nproc_limit</name>
-    <value>2048</value>
-    <description>Max number of processes for Elasticsearch user.</description>
-  </property>
-  <property>
-    <name>elastic_user_memlock_soft_limit</name>
-    <value>unlimited</value>
-    <description>Max locked-in memory address space (soft memlock limit).</description>
-  </property>
-  <property>
-    <name>elastic_user_memlock_hard_limit</name>
-    <value>unlimited</value>
-    <description>Max locked-in memory address space (hard memlock limit).</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-jvm-options.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-jvm-options.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-jvm-options.xml
deleted file mode 100644
index 5c6aaca..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-jvm-options.xml
+++ /dev/null
@@ -1,144 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-    <property>
-        <name>heap_size</name>
-        <value>512m</value>
-        <description>JVM heap size</description>
-    </property>
-    <property>
-        <name>content</name>
-        <description>The jinja template for the Elasticsearch JVM options file.</description>
-        <value>
-## JVM configuration
-
-################################################################
-## IMPORTANT: JVM heap size
-################################################################
-##
-## You should always set the min and max JVM heap
-## size to the same value. For example, to set
-## the heap to 4 GB, set:
-##
-## -Xms4g
-## -Xmx4g
-##
-## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
-## for more information
-##
-################################################################
-
-# Xms represents the initial size of total heap space
-# Xmx represents the maximum size of total heap space
-
--Xms{{heap_size}}
--Xmx{{heap_size}}
-
-################################################################
-## Expert settings
-################################################################
-##
-## All settings below this section are considered
-## expert settings. Don't tamper with them unless
-## you understand what you are doing
-##
-################################################################
-
-## GC configuration
--XX:+UseConcMarkSweepGC
--XX:CMSInitiatingOccupancyFraction=75
--XX:+UseCMSInitiatingOccupancyOnly
-
-## optimizations
-
-# pre-touch memory pages used by the JVM during initialization
--XX:+AlwaysPreTouch
-
-## basic
-
-# force the server VM (remove on 32-bit client JVMs)
--server
-
-# explicitly set the stack size (reduce to 320k on 32-bit client JVMs)
--Xss1m
-
-# set to headless, just in case
--Djava.awt.headless=true
-
-# ensure UTF-8 encoding by default (e.g. filenames)
--Dfile.encoding=UTF-8
-
-# use our provided JNA always versus the system one
--Djna.nosys=true
-
-# use old-style file permissions on JDK9
--Djdk.io.permissionsUseCanonicalPath=true
-
-# flags to configure Netty
--Dio.netty.noUnsafe=true
--Dio.netty.noKeySetOptimization=true
--Dio.netty.recycler.maxCapacityPerThread=0
-
-# log4j 2
--Dlog4j.shutdownHookEnabled=false
--Dlog4j2.disable.jmx=true
--Dlog4j.skipJansi=true
-
-## heap dumps
-
-# generate a heap dump when an allocation from the Java heap fails
-# heap dumps are created in the working directory of the JVM
--XX:+HeapDumpOnOutOfMemoryError
-
-# specify an alternative path for heap dumps
-# ensure the directory exists and has sufficient space
-#-XX:HeapDumpPath=${heap.dump.path}
-
-## GC logging
-
-#-XX:+PrintGCDetails
-#-XX:+PrintGCTimeStamps
-#-XX:+PrintGCDateStamps
-#-XX:+PrintClassHistogram
-#-XX:+PrintTenuringDistribution
-#-XX:+PrintGCApplicationStoppedTime
-
-# log GC status to a file with time stamps
-# ensure the directory exists
-#-Xloggc:${loggc}
-
-# By default, the GC log file will not rotate.
-# By uncommenting the lines below, the GC log file
-# will be rotated every 128MB at most 32 times.
-#-XX:+UseGCLogFileRotation
-#-XX:NumberOfGCLogFiles=32
-#-XX:GCLogFileSize=128M
-
-# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
-# If documents were already indexed with unquoted fields in a previous version
-# of Elasticsearch, some operations may throw errors.
-#
-# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
-# only for migration purposes.
-#-Delasticsearch.json.allow_unquoted_field_names=true
-        </value>
-    </property>
-</configuration>


[15/50] [abbrv] metron git commit: METRON-1500 Enhance 'prepare-commit' to Support Feature Branches (nickwallen) closes apache/metron#971

Posted by rm...@apache.org.
METRON-1500 Enhance 'prepare-commit' to Support Feature Branches (nickwallen) closes apache/metron#971


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/9e95d4b6
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/9e95d4b6
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/9e95d4b6

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 9e95d4b61410c8033f0a4ea51e831566d5d933d3
Parents: 5ed9631
Author: nickwallen <ni...@nickallen.org>
Authored: Fri Mar 23 12:23:34 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Fri Mar 23 12:23:34 2018 -0400

----------------------------------------------------------------------
 dev-utilities/committer-utils/prepare-commit | 27 ++++++++++++++++++-----
 1 file changed, 22 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/9e95d4b6/dev-utilities/committer-utils/prepare-commit
----------------------------------------------------------------------
diff --git a/dev-utilities/committer-utils/prepare-commit b/dev-utilities/committer-utils/prepare-commit
index 67116cb..ae8d7ab 100755
--- a/dev-utilities/committer-utils/prepare-commit
+++ b/dev-utilities/committer-utils/prepare-commit
@@ -19,9 +19,9 @@
 # not likely to change
 METRON_UPSTREAM="https://git-wip-us.apache.org/repos/asf/metron.git"
 BRO_PLUGIN_UPSTREAM="https://git-wip-us.apache.org/repos/asf/metron-bro-plugin-kafka.git"
-BASE_BRANCH=master
 CONFIG_FILE=~/.metron-prepare-commit
 GITHUB_REMOTE="origin"
+BASE_BRANCH=master
 
 # does a config file already exist?
 if [ -f $CONFIG_FILE ]; then
@@ -114,6 +114,11 @@ if [ ! -d "$WORK" ]; then
   read -p "  origin repo [$ORIGIN]: " INPUT
   [ -n "$INPUT" ] && ORIGIN=$INPUT
 
+  # what branch did the PR get submitted against?  could be a feature branch
+  BASE_BRANCH=`curl -s https://api.github.com/repos/apache/${CHOSEN_REPO}/pulls/$PR | python -c 'import sys, json; print json.load(sys.stdin)["base"]["ref"]'`
+  read -p "  base branch to merge into [$BASE_BRANCH]: " INPUT
+  [ -n "$INPUT" ] && BASE_BRANCH=$INPUT
+
   # clone the repository and fetch updates
   mkdir -p $WORK
   git clone $ORIGIN $WORK
@@ -125,11 +130,23 @@ if [ ! -d "$WORK" ]; then
 
   # fetch any changes from upstream
   git remote add upstream $UPSTREAM
-  git fetch upstream $BASE_BRANCH
+  if git fetch upstream "$BASE_BRANCH"; then
+
+    if [ $BASE_BRANCH = "master" ]; then
+      # merge any changes from upstream
+      git checkout $BASE_BRANCH
+      git merge upstream/$BASE_BRANCH
 
-  # merge any changes from upstream
-  git checkout $BASE_BRANCH
-  git merge upstream/$BASE_BRANCH
+    else
+      # create a local branch from the remote feature branch
+      git checkout -B $BASE_BRANCH upstream/$BASE_BRANCH
+
+    fi
+
+  else
+    # unable to fetch the base branch
+    exit $?
+  fi
 
 else
 


[31/50] [abbrv] metron git commit: METRON-1494 Profiler Emits Messages to Kafka When Not Needed (nickwallen) closes apache/metron#967

Posted by rm...@apache.org.
METRON-1494 Profiler Emits Messages to Kafka When Not Needed (nickwallen) closes apache/metron#967


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/62d1a1bf
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/62d1a1bf
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/62d1a1bf

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 62d1a1bf7e8b9b3ee2f260c358719ea5080c9045
Parents: 438893b
Author: nickwallen <ni...@nickallen.org>
Authored: Wed Apr 11 17:57:09 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Wed Apr 11 17:57:09 2018 -0400

----------------------------------------------------------------------
 .../metron/profiler/DefaultProfileBuilder.java  |   5 +
 .../bolt/FixedFrequencyFlushSignal.java         |  13 +-
 .../metron/profiler/bolt/HBaseEmitter.java      |  12 +-
 .../metron/profiler/bolt/KafkaEmitter.java      |  78 +++++--
 .../profiler/bolt/ProfileSplitterBolt.java      |   5 +
 .../metron/profiler/bolt/HBaseEmitterTest.java  | 120 +++++++++++
 .../metron/profiler/bolt/KafkaEmitterTest.java  | 201 +++++++++++++------
 7 files changed, 358 insertions(+), 76 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/62d1a1bf/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java
index 4b564c9..66034ac 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java
@@ -124,8 +124,13 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
    */
   @Override
   public void apply(JSONObject message, long timestamp) {
+    LOG.debug("Applying message to profile; profile={}, entity={}, timestamp={}",
+            profileName, entity, timestamp);
+
     try {
       if (!isInitialized()) {
+        LOG.debug("Initializing profile; profile={}, entity={}, timestamp={}",
+                profileName, entity, timestamp);
 
         // execute each 'init' expression
         assign(definition.getInit(), message, "init");

http://git-wip-us.apache.org/repos/asf/metron/blob/62d1a1bf/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignal.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignal.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignal.java
index b9f57dd..8c0a0b1 100644
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignal.java
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignal.java
@@ -94,7 +94,8 @@ public class FixedFrequencyFlushSignal implements FlushSignal {
 
       // set the next time to flush
       flushTime = currentTime + flushFrequency;
-      LOG.debug("Setting flush time; flushTime={}, currentTime={}, flushFreq={}",
+      LOG.debug("Setting flush time; '{}' ms until flush; flushTime={}, currentTime={}, flushFreq={}",
+              timeToNextFlush(),
               flushTime,
               currentTime,
               flushFrequency);
@@ -112,7 +113,7 @@ public class FixedFrequencyFlushSignal implements FlushSignal {
     boolean flush = currentTime > flushTime;
     LOG.debug("Flush={}, '{}' ms until flush; currentTime={}, flushTime={}",
             flush,
-            flush ? 0 : (flushTime-currentTime),
+            timeToNextFlush(),
             currentTime,
             flushTime);
 
@@ -123,4 +124,12 @@ public class FixedFrequencyFlushSignal implements FlushSignal {
   public long currentTimeMillis() {
     return currentTime;
   }
+
+  /**
+   * Returns the number of milliseconds to the next flush.
+   * @return The time left until the next flush.
+   */
+  private long timeToNextFlush() {
+    return Math.max(0, flushTime - currentTime);
+  }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/62d1a1bf/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseEmitter.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseEmitter.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseEmitter.java
index 8e1229a..e4e3552 100644
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseEmitter.java
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseEmitter.java
@@ -40,7 +40,7 @@ public class HBaseEmitter implements ProfileMeasurementEmitter, Serializable {
   /**
    * The stream identifier used for this destination;
    */
-  private  String streamId = "hbase";
+  private String streamId = "hbase";
 
   @Override
   public void declareOutputFields(OutputFieldsDeclarer declarer) {
@@ -49,7 +49,17 @@ public class HBaseEmitter implements ProfileMeasurementEmitter, Serializable {
 
   @Override
   public void emit(ProfileMeasurement measurement, OutputCollector collector) {
+
+    // measurements are always emitted to hbase
     collector.emit(getStreamId(), new Values(measurement));
+
+    LOG.debug("Emitted measurement; stream={}, profile={}, entity={}, period={}, start={}, end={}",
+            getStreamId(),
+            measurement.getProfileName(),
+            measurement.getEntity(),
+            measurement.getPeriod().getPeriod(),
+            measurement.getPeriod().getStartTimeMillis(),
+            measurement.getPeriod().getEndTimeMillis());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/metron/blob/62d1a1bf/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaEmitter.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaEmitter.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaEmitter.java
index 29d1a49..87920da 100644
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaEmitter.java
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaEmitter.java
@@ -19,8 +19,7 @@
 
 package org.apache.metron.profiler.bolt;
 
-import java.io.Serializable;
-import java.lang.invoke.MethodHandles;
+import org.apache.commons.collections4.MapUtils;
 import org.apache.commons.lang3.ClassUtils;
 import org.apache.metron.profiler.ProfileMeasurement;
 import org.apache.storm.task.OutputCollector;
@@ -31,6 +30,10 @@ import org.json.simple.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.Serializable;
+import java.lang.invoke.MethodHandles;
+import java.util.Map;
+
 /**
  * Responsible for emitting a {@link ProfileMeasurement} to an output stream that will
  * persist data in HBase.
@@ -58,19 +61,48 @@ public class KafkaEmitter implements ProfileMeasurementEmitter, Serializable {
   @Override
   public void emit(ProfileMeasurement measurement, OutputCollector collector) {
 
-    JSONObject message = new JSONObject();
-    message.put("profile", measurement.getDefinition().getProfile());
-    message.put("entity", measurement.getEntity());
-    message.put("period", measurement.getPeriod().getPeriod());
-    message.put("period.start", measurement.getPeriod().getStartTimeMillis());
-    message.put("period.end", measurement.getPeriod().getEndTimeMillis());
-    message.put("timestamp", System.currentTimeMillis());
-    message.put("source.type", sourceType);
-    message.put("is_alert", "true");
+    // only need to emit, if there are triage values
+    Map<String, Object> triageValues = measurement.getTriageValues();
+    if(MapUtils.isNotEmpty(triageValues)) {
+
+      JSONObject message = createMessage(measurement);
+      appendTriageValues(measurement, message);
+      collector.emit(getStreamId(), new Values(message));
+
+      LOG.debug("Emitted measurement; stream={}, profile={}, entity={}, period={}, start={}, end={}",
+              getStreamId(),
+              measurement.getProfileName(),
+              measurement.getEntity(),
+              measurement.getPeriod().getPeriod(),
+              measurement.getPeriod().getStartTimeMillis(),
+              measurement.getPeriod().getEndTimeMillis());
+
+    } else {
+
+      LOG.debug("No triage values, nothing to emit; stream={}, profile={}, entity={}, period={}, start={}, end={}",
+              getStreamId(),
+              measurement.getProfileName(),
+              measurement.getEntity(),
+              measurement.getPeriod().getPeriod(),
+              measurement.getPeriod().getStartTimeMillis(),
+              measurement.getPeriod().getEndTimeMillis());
+    }
+  }
 
-    // append each of the triage values to the message
-    measurement.getTriageValues().forEach((key, value) -> {
+  /**
+   * Appends triage values obtained from a {@code ProfileMeasurement} to the
+   * outgoing message.
+   *
+   * @param measurement The measurement that may contain triage values.
+   * @param message The message that the triage values are appended to.
+   */
+  private void appendTriageValues(ProfileMeasurement measurement, JSONObject message) {
+
+    // for each triage value...
+    Map<String, Object> triageValues = MapUtils.emptyIfNull(measurement.getTriageValues());
+    triageValues.forEach((key, value) -> {
 
+      // append the triage value to the message
       if(isValidType(value)) {
         message.put(key, value);
 
@@ -83,8 +115,26 @@ public class KafkaEmitter implements ProfileMeasurementEmitter, Serializable {
                 key));
       }
     });
+  }
+
+  /**
+   * Creates a message that will be emitted to Kafka.
+   *
+   * @param measurement The profile measurement used as a basis for the message.
+   * @return A message that can be emitted to Kafka.
+   */
+  private JSONObject createMessage(ProfileMeasurement measurement) {
 
-    collector.emit(getStreamId(), new Values(message));
+    JSONObject message = new JSONObject();
+    message.put("profile", measurement.getDefinition().getProfile());
+    message.put("entity", measurement.getEntity());
+    message.put("period", measurement.getPeriod().getPeriod());
+    message.put("period.start", measurement.getPeriod().getStartTimeMillis());
+    message.put("period.end", measurement.getPeriod().getEndTimeMillis());
+    message.put("timestamp", System.currentTimeMillis());
+    message.put("source.type", sourceType);
+    message.put("is_alert", "true");
+    return message;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/metron/blob/62d1a1bf/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
index a92a432..f28411f 100644
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
@@ -190,6 +190,11 @@ public class ProfileSplitterBolt extends ConfiguredProfilerBolt {
 
       Values values = createValues(message, timestamp, route);
       collector.emit(input, values);
+
+      LOG.debug("Found route for message; profile={}, entity={}, timestamp={}",
+              route.getProfileDefinition().getProfile(),
+              route.getEntity(),
+              timestamp);
     }
 
     LOG.debug("Found {} route(s) for message with timestamp={}", routes.size(), timestamp);

http://git-wip-us.apache.org/repos/asf/metron/blob/62d1a1bf/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/HBaseEmitterTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/HBaseEmitterTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/HBaseEmitterTest.java
new file mode 100644
index 0000000..35ca4d9
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/HBaseEmitterTest.java
@@ -0,0 +1,120 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.metron.profiler.bolt;
+
+import org.adrianwalker.multilinestring.Multiline;
+import org.apache.metron.common.configuration.profiler.ProfileConfig;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.profiler.ProfileMeasurement;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.tuple.Values;
+import org.json.simple.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Tests the HBaseEmitter class.
+ */
+public class HBaseEmitterTest {
+
+  /**
+   * {
+   *   "profile": "profile-one",
+   *   "foreach": "ip_src_addr",
+   *   "init":   { "x": "0" },
+   *   "update": { "x": "x + 1" },
+   *   "result": "x"
+   * }
+   */
+  @Multiline
+  private String profileDefinition;
+
+  private HBaseEmitter emitter;
+  private ProfileConfig profile;
+  private OutputCollector collector;
+
+  @Before
+  public void setup() throws Exception {
+    emitter = new HBaseEmitter();
+    profile = createDefinition(profileDefinition);
+    collector = Mockito.mock(OutputCollector.class);
+  }
+
+  /**
+   * The handler should emit a message containing the result of executing
+   * the 'result/profile' expression.
+   */
+  @Test
+  public void testEmit() throws Exception {
+
+    // create a measurement that has triage values
+    ProfileMeasurement measurement = new ProfileMeasurement()
+            .withProfileName("profile")
+            .withEntity("entity")
+            .withPeriod(20000, 15, TimeUnit.MINUTES)
+            .withDefinition(profile)
+            .withProfileValue(22);
+
+    // execute the test
+    emitter.emit(measurement, collector);
+
+    // the measurement should be emitted as-is
+    ProfileMeasurement actual = expectMeasurement(emitter, collector);
+    assertEquals(measurement, actual);
+  }
+
+  /**
+   * Verifies that the emitter does emit a {@code ProfileMeasurement}.
+   *
+   * @return The {@code ProfileMeasurement} that was emitted
+   */
+  private ProfileMeasurement expectMeasurement(HBaseEmitter hbaseEmitter, OutputCollector collector) {
+
+    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
+    verify(collector, times(1)).emit(eq(hbaseEmitter.getStreamId()), arg.capture());
+    Values values = arg.getValue();
+    assertTrue(values.get(0) instanceof ProfileMeasurement);
+    return (ProfileMeasurement) values.get(0);
+  }
+
+  /**
+   * Creates a profile definition based on a string of JSON.
+   * @param json The string of JSON.
+   */
+  private ProfileConfig createDefinition(String json) throws IOException {
+    return JSONUtils.INSTANCE.load(json, ProfileConfig.class);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/62d1a1bf/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaEmitterTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaEmitterTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaEmitterTest.java
index b02e377..95a2d29 100644
--- a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaEmitterTest.java
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaEmitterTest.java
@@ -43,6 +43,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.eq;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -58,54 +59,128 @@ public class KafkaEmitterTest {
    *   "foreach": "ip_src_addr",
    *   "init":   { "x": "0" },
    *   "update": { "x": "x + 1" },
-   *   "result": "x"
+   *   "result": {
+   *      "profile": "x",
+   *      "triage": {
+   *        "value": "x"
+   *       }
+   *    }
    * }
    */
   @Multiline
-  private String profileDefinition;
+  private String profileDefinitionWithTriage;
 
-  private KafkaEmitter handler;
+  private KafkaEmitter kafkaEmitter;
   private ProfileConfig profile;
   private OutputCollector collector;
 
   @Before
   public void setup() throws Exception {
-    handler = new KafkaEmitter();
-    profile = createDefinition(profileDefinition);
+    kafkaEmitter = new KafkaEmitter();
+    profile = createDefinition(profileDefinitionWithTriage);
     collector = Mockito.mock(OutputCollector.class);
   }
 
   /**
-   * The handler must serialize the ProfileMeasurement into a JSONObject.
+   * The handler should emit a message when a result/triage expression(s) has been defined.
    */
   @Test
-  public void testSerialization() throws Exception {
+  public void testEmit() throws Exception {
 
+    // create a measurement that has triage values
+    ProfileMeasurement measurement = new ProfileMeasurement()
+            .withProfileName("profile")
+            .withEntity("entity")
+            .withPeriod(20000, 15, TimeUnit.MINUTES)
+            .withDefinition(profile)
+            .withTriageValues(Collections.singletonMap("triage-key", "triage-value"));
+
+    // execute the test
+    kafkaEmitter.emit(measurement, collector);
+
+    // a message should be emitted
+    verify(collector, times(1)).emit(eq(kafkaEmitter.getStreamId()), any());
+  }
+
+  /**
+   * The handler should NOT emit a message when there is NO result/triage value(s).
+   */
+  @Test
+  public void testDoNotEmit() throws Exception {
+
+    // create a measurement with NO triage values
     ProfileMeasurement measurement = new ProfileMeasurement()
             .withProfileName("profile")
             .withEntity("entity")
             .withPeriod(20000, 15, TimeUnit.MINUTES)
-            .withTriageValues(Collections.singletonMap("triage-key", "triage-value"))
             .withDefinition(profile);
-    handler.emit(measurement, collector);
 
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
-    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
+    // execute the test
+    kafkaEmitter.emit(measurement, collector);
 
-    // expect a JSONObject
-    Values values = arg.getValue();
-    assertTrue(values.get(0) instanceof JSONObject);
+    // a message should NOT be emitted
+    verify(collector, times(0)).emit(eq(kafkaEmitter.getStreamId()), any());
+  }
 
-    // validate the json
-    JSONObject actual = (JSONObject) values.get(0);
-    assertEquals(measurement.getDefinition().getProfile(), actual.get("profile"));
-    assertEquals(measurement.getEntity(), actual.get("entity"));
-    assertEquals(measurement.getPeriod().getPeriod(), actual.get("period"));
-    assertEquals(measurement.getPeriod().getStartTimeMillis(), actual.get("period.start"));
-    assertEquals(measurement.getPeriod().getEndTimeMillis(), actual.get("period.end"));
-    assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
+  /**
+   * Validate that the message generated for Kafka should include the triage value.
+   */
+  @Test
+  public void testTriageValueInMessage() throws Exception {
+
+    // create a measurement that has triage values
+    ProfileMeasurement measurement = new ProfileMeasurement()
+            .withDefinition(profile)
+            .withProfileName(profile.getProfile())
+            .withEntity("entity")
+            .withPeriod(20000, 15, TimeUnit.MINUTES)
+            .withTriageValues(Collections.singletonMap("triage-key", "triage-value"));
+
+    // execute the test
+    kafkaEmitter.emit(measurement, collector);
+    JSONObject actual = expectJsonObject(kafkaEmitter, collector);
+
+    // validate the core parts of the message
+    assertEquals(measurement.getProfileName(),                    actual.get("profile"));
+    assertEquals(measurement.getEntity(),                         actual.get("entity"));
+    assertEquals(measurement.getPeriod().getPeriod(),             actual.get("period"));
+    assertEquals(measurement.getPeriod().getStartTimeMillis(),    actual.get("period.start"));
+    assertEquals(measurement.getPeriod().getEndTimeMillis(),      actual.get("period.end"));
+    assertEquals("profiler",                                      actual.get("source.type"));
     assertNotNull(actual.get("timestamp"));
-    assertEquals("profiler", actual.get("source.type"));
+
+    // validate that the triage value has been added
+    assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
+  }
+
+  /**
+   * Validate that the message generated for Kafka can include multiple triage values.
+   */
+  @Test
+  public void testMultipleTriageValueInMessage() throws Exception {
+
+    // multiple triage values have been defined
+    Map<String, Object> triageValues = ImmutableMap.of(
+            "x", 2,
+            "y", "4",
+            "z", 6.0);
+
+    // create a measurement that has multiple triage values
+    ProfileMeasurement measurement = new ProfileMeasurement()
+            .withDefinition(profile)
+            .withProfileName(profile.getProfile())
+            .withEntity("entity")
+            .withPeriod(20000, 15, TimeUnit.MINUTES)
+            .withTriageValues(triageValues);
+
+    // execute the test
+    kafkaEmitter.emit(measurement, collector);
+    JSONObject actual = expectJsonObject(kafkaEmitter, collector);
+
+    // validate that ALL of the triage values have been added
+    assertEquals(measurement.getTriageValues().get("x"), actual.get("x"));
+    assertEquals(measurement.getTriageValues().get("y"), actual.get("y"));
+    assertEquals(measurement.getTriageValues().get("z"), actual.get("z"));
   }
 
   /**
@@ -120,30 +195,27 @@ public class KafkaEmitterTest {
             "invalid", new OnlineStatisticsProvider(),
             "valid", 4);
 
+    // create the measurement with a Map as a triage value; this is not allowed
     ProfileMeasurement measurement = new ProfileMeasurement()
-            .withProfileName("profile")
+            .withDefinition(profile)
+            .withProfileName(profile.getProfile())
             .withEntity("entity")
             .withPeriod(20000, 15, TimeUnit.MINUTES)
-            .withTriageValues(triageValues)
-            .withDefinition(profile);
-    handler.emit(measurement, collector);
+            .withTriageValues(triageValues);
 
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
-    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
-    Values values = arg.getValue();
-    assertTrue(values.get(0) instanceof JSONObject);
+    // execute the test
+    kafkaEmitter.emit(measurement, collector);
+    JSONObject actual = expectJsonObject(kafkaEmitter, collector);
 
-    // only the triage expression value itself should have been skipped, all others should be there
-    JSONObject actual = (JSONObject) values.get(0);
-    assertEquals(measurement.getDefinition().getProfile(), actual.get("profile"));
-    assertEquals(measurement.getEntity(), actual.get("entity"));
-    assertEquals(measurement.getPeriod().getPeriod(), actual.get("period"));
-    assertEquals(measurement.getPeriod().getStartTimeMillis(), actual.get("period.start"));
-    assertEquals(measurement.getPeriod().getEndTimeMillis(), actual.get("period.end"));
-    assertNotNull(actual.get("timestamp"));
-    assertEquals("profiler", actual.get("source.type"));
+    // validate the core parts of the message still exist
+    assertEquals(measurement.getProfileName(),                    actual.get("profile"));
+    assertEquals(measurement.getEntity(),                         actual.get("entity"));
+    assertEquals(measurement.getPeriod().getPeriod(),             actual.get("period"));
+    assertEquals(measurement.getPeriod().getStartTimeMillis(),    actual.get("period.start"));
+    assertEquals(measurement.getPeriod().getEndTimeMillis(),      actual.get("period.end"));
+    assertEquals("profiler",                                      actual.get("source.type"));
 
-    // the invalid expression should be skipped due to invalid type
+    // the invalid expression should be skipped and not included in the message
     assertFalse(actual.containsKey("invalid"));
 
     // but the valid expression should still be there
@@ -156,19 +228,18 @@ public class KafkaEmitterTest {
    */
   @Test
   public void testIntegerIsValidType() throws Exception {
+
+    // create a measurement with a triage value that is an integer
     ProfileMeasurement measurement = new ProfileMeasurement()
-            .withProfileName("profile")
+            .withDefinition(profile)
+            .withProfileName(profile.getProfile())
             .withEntity("entity")
             .withPeriod(20000, 15, TimeUnit.MINUTES)
-            .withTriageValues(Collections.singletonMap("triage-key", 123))
-            .withDefinition(profile);
-    handler.emit(measurement, collector);
+            .withTriageValues(Collections.singletonMap("triage-key", 123));
 
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
-    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
-    Values values = arg.getValue();
-    assertTrue(values.get(0) instanceof JSONObject);
-    JSONObject actual = (JSONObject) values.get(0);
+    // execute the test
+    kafkaEmitter.emit(measurement, collector);
+    JSONObject actual = expectJsonObject(kafkaEmitter, collector);
 
     // the triage expression is valid
     assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
@@ -180,25 +251,37 @@ public class KafkaEmitterTest {
    */
   @Test
   public void testStringIsValidType() throws Exception {
+
+    // create a measurement with a triage value that is a string
     ProfileMeasurement measurement = new ProfileMeasurement()
-            .withProfileName("profile")
+            .withDefinition(profile)
+            .withProfileName(profile.getProfile())
             .withEntity("entity")
             .withPeriod(20000, 15, TimeUnit.MINUTES)
-            .withTriageValues(Collections.singletonMap("triage-key", "value"))
-            .withDefinition(profile);
-    handler.emit(measurement, collector);
+            .withTriageValues(Collections.singletonMap("triage-key", "value"));
 
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
-    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
-    Values values = arg.getValue();
-    assertTrue(values.get(0) instanceof JSONObject);
-    JSONObject actual = (JSONObject) values.get(0);
+    // execute the test
+    kafkaEmitter.emit(measurement, collector);
+    JSONObject actual = expectJsonObject(kafkaEmitter, collector);
 
     // the triage expression is valid
     assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
   }
 
   /**
+   * Verifies that the KafkaEmitter does emit a JSONObject.
+   * @return The JSONObject that was emitted
+   */
+  private JSONObject expectJsonObject(KafkaEmitter kafkaEmitter, OutputCollector collector) {
+
+    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
+    verify(collector, times(1)).emit(eq(kafkaEmitter.getStreamId()), arg.capture());
+    Values values = arg.getValue();
+    assertTrue(values.get(0) instanceof JSONObject);
+    return (JSONObject) values.get(0);
+  }
+
+  /**
    * Creates a profile definition based on a string of JSON.
    * @param json The string of JSON.
    */


[09/50] [abbrv] metron git commit: METRON-1483: Create a tool to monitor performance of the topologies closes apache/incubator-metron#958

Posted by rm...@apache.org.
METRON-1483: Create a tool to monitor performance of the topologies closes apache/incubator-metron#958


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/46ad9d93
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/46ad9d93
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/46ad9d93

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 46ad9d93b4385da0f8668f2ba84212d54d00ba4b
Parents: e3eeec3
Author: cstella <ce...@gmail.com>
Authored: Tue Mar 20 09:36:32 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Tue Mar 20 09:36:32 2018 -0400

----------------------------------------------------------------------
 metron-contrib/metron-performance/README.md     | 205 ++++++++
 .../performance_measurement.png                 | Bin 0 -> 5790 bytes
 metron-contrib/metron-performance/pom.xml       | 134 +++++
 .../src/main/assembly/assembly.xml              |  42 ++
 .../metron/performance/load/LoadGenerator.java  | 175 +++++++
 .../metron/performance/load/LoadOptions.java    | 499 +++++++++++++++++++
 .../performance/load/MessageGenerator.java      |  48 ++
 .../metron/performance/load/SendToKafka.java    | 107 ++++
 .../load/monitor/AbstractMonitor.java           |  49 ++
 .../load/monitor/EPSGeneratedMonitor.java       |  53 ++
 .../monitor/EPSThroughputWrittenMonitor.java    |  77 +++
 .../performance/load/monitor/MonitorNaming.java |  23 +
 .../performance/load/monitor/MonitorTask.java   |  44 ++
 .../performance/load/monitor/Results.java       |  51 ++
 .../load/monitor/writers/CSVWriter.java         |  67 +++
 .../load/monitor/writers/ConsoleWriter.java     |  65 +++
 .../load/monitor/writers/Writable.java          |  40 ++
 .../load/monitor/writers/Writer.java            |  86 ++++
 .../performance/sampler/BiasedSampler.java      | 113 +++++
 .../metron/performance/sampler/Sampler.java     |  24 +
 .../performance/sampler/UnbiasedSampler.java    |  28 ++
 .../metron/performance/util/KafkaUtil.java      |  56 +++
 .../src/main/scripts/load_tool.sh               |  36 ++
 .../performance/load/LoadOptionsTest.java       |  93 ++++
 .../performance/load/SendToKafkaTest.java       |  49 ++
 .../metron/performance/sampler/SamplerTest.java | 145 ++++++
 metron-contrib/pom.xml                          |  15 +
 .../common-services/METRON/CURRENT/metainfo.xml |   4 +
 .../packaging/docker/deb-docker/pom.xml         |   6 +
 .../docker/rpm-docker/SPECS/metron.spec         |  21 +
 .../packaging/docker/rpm-docker/pom.xml         |   6 +
 31 files changed, 2361 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/README.md
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/README.md b/metron-contrib/metron-performance/README.md
new file mode 100644
index 0000000..8981349
--- /dev/null
+++ b/metron-contrib/metron-performance/README.md
@@ -0,0 +1,205 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+# Performance Utilities
+
+This project creates some useful performance monitoring and measurement
+utilities.
+
+## `load-tool.sh`
+
+The Load tool is intended to do the following:
+* Generate a load at a specific events per second into kafka
+  * The messages are taken from a template file, where there is a message template per line
+  * The load can be biased (e.g. 80% of the load can be comprised of 20% of the templates)
+* Monitor the kafka offsets for a topic to determine the events per second written
+  * This could be the topic that you are generating load on
+  * This could be another topic that represents the output of some topology (e.g. generate load on `enrichments` and monitor `indexing` to determine the throughput of the enrichment topology).
+
+```
+usage: Generator
+ -bs,--sample_bias <BIAS_FILE>         The discrete distribution to bias
+                                       the sampling. This is a CSV of 2
+                                       columns.  The first column is the %
+                                       of the templates and the 2nd column
+                                       is the probability (0-100) that
+                                       it's chosen.  For instance:
+                                       20,80
+                                       80,20
+                                       implies that 20% of the templates
+                                       will comprise 80% of the output and
+                                       the remaining 80% of the templates
+                                       will comprise 20% of the output.
+ -c,--csv <CSV_FILE>                   A CSV file to emit monitoring data
+                                       to.  The format is a CSV with the
+                                       following schema: timestamp, (name,
+                                       eps, historical_mean,
+                                       historical_stddev)+
+ -cg,--consumer_group <GROUP_ID>       Consumer Group.  The default is
+                                       load.group
+ -e,--eps <EPS>                        The target events per second
+ -h,--help                             Generate Help screen
+ -k,--kafka_config <CONFIG_FILE>       The kafka config.  This is a file
+                                       containing a JSON map with the
+                                       kafka config.
+ -l,--lookback <LOOKBACK>              When summarizing, how many
+                                       monitoring periods should we
+                                       summarize over?  If 0, then no
+                                       summary.  Default: 5
+ -md,--monitor_delta_ms <TIME_IN_MS>   The time (in ms) between monitoring
+                                       output. Default is 10000
+ -mt,--monitor_topic <TOPIC>           The kafka topic to monitor.
+ -ot,--output_topic <TOPIC>            The kafka topic to write to
+ -p,--threads <NUM_THREADS>            The number of threads to use when
+                                       extracting data.  The default is
+                                       the number of cores of your
+                                       machine.
+ -sd,--send_delta_ms <TIME_IN_MS>      The time (in ms) between sending a
+                                       batch of messages. Default is 100
+ -t,--template <TEMPLATE_FILE>         The template file to use for
+                                       generation.  This should be a file
+                                       with a template per line with
+                                       $METRON_TS and $METRON_GUID in the
+                                       spots for timestamp and guid, if
+                                       you so desire them.
+ -tl,--time_limit_ms <MS>              The total amount of time to run
+                                       this in milliseconds.  By default,
+                                       it never stops.
+ -z,--zk_quorum <QUORUM>               zookeeper quorum
+```
+
+## Templates
+Messages are drawn from a template file.  A template file has a message template per line.  
+For instance, let's say we want to generate JSON maps with fields: `source.type`, `ip_src_addr` 
+and `ip_dst_addr`.  We can generate a template file with a template like the following per line:
+```
+{ "source.type" : "asa", "ip_src_addr" : "127.0.0.1", "ip_dst_addr" : "191.168.1.1" }
+```
+
+When messages are generated, there are some special replacements that can be used: `$METRON_TS` and `$METRON_GUID`.
+We can adjust our previous template to use these like so:
+```
+{ "source.type" : "asa", "ip_src_addr" : "127.0.0.1", "ip_dst_addr" : "191.168.1.1", "timestamp" : $METRON_TS, "guid" : "$METRON_GUID" }
+```
+One note about GUIDs generated.  We do not generate global UUIDs, they are unique only within the context of a given generator run.  
+
+## Biased Sampling
+
+This load tool can be configured to use biased sampling.  This is useful if, for instance, you are trying to model data which is not distributed
+uniformly, like many types of network data.  Generating synthetic data with similar distribution to your regular data will enable the caches
+to be exercised in the same way, for instance, and yield a more realistic scenario.
+
+You specify the biases in a csv file with 2 columns:
+* The first column represents the % of the templates
+* The second column represents the % of the generated output. 
+
+A simple example would be to generate samples based on Pareto's principle:
+```
+20,80
+80,20
+``` 
+This would yield biases that mean the first 20% of the templates in the template file would comprise 80% of the output.
+
+A more complex example might be:
+```
+20,80
+20,5
+50,1
+10,14
+``` 
+This would would imply:
+* The first 20% of the templates would comprise 80% of the output
+* The next 20% of the templates would comprise 5% of the output
+* The next 50% of the templates would comprise 1% of the output
+* The next 10% of the templates would comprise 14% of the output.
+
+## CSV Output
+
+For those who would prefer a different visualization or wish to incorporate the output of this tool into an automated test,
+you can specify a file to emit data in CSV format to via the `-c` or `--csv` option.
+
+The CSV columns are as follows:
+* timestamp in epoch millis
+
+If you are generating synthetic data, then:
+* "generated"
+* The events per second generated
+* The mean of the events per second generated for the the last `k` runs, where `k` is the lookback (set via `-l` and defaulted to `5`)
+* The standard deviation of the events per second generated for the last `k` runs, where `k` is the lookback (set via `-l` and defaulted to `5`)
+
+If you are monitoring a topic, then:
+* "throughput measured"
+* The events per second measured
+* The mean of the events per second measured for the the last `k` runs, where `k` is the lookback (set via `-l` and defaulted to `5`)
+* The standard deviation of the events per second measured for the last `k` runs, where `k` is the lookback (set via `-l` and defaulted to `5`)
+
+Obviously, if you are doing both generating and monitoring the throughput of a topic, then all of the columns are added.
+
+An example of CSV output is:
+```
+1520506955047,generated,,,,throughput measured,,,
+1520506964896,generated,1045,1045,0,throughput measured,,,
+1520506974896,generated,1000,1022,31,throughput measured,1002,1002,0
+1520506984904,generated,999,1014,26,throughput measured,999,1000,2
+1520506994896,generated,1000,1011,22,throughput measured,1000,1000,1
+1520507004896,generated,1000,1008,20,throughput measured,1000,1000,1
+```
+
+## Use-cases for the Load Tool
+
+### Measure Throughput of a Topology
+
+One can use the load tool to monitor performance of a kafka-to-kafka topology.
+For instance, we could monitor the throughput of the enrichment topology by monitoring the `enrichments` kafka topic:
+```
+$METRON_HOME/bin/load_tool.sh -mt enrichments -z $ZOOKEEPER
+```
+
+### Generate Synthetic Load and Measure Performance
+
+One can use the load tool to generate synthetic load and monitor performance of a kafka-to-kafka topology.  For instance, we could
+monitor the performance of the enrichment topology.  It is advised to start the enrichment topology against a new topic and write 
+to a new topic so as to not pollute your downstream indices.  So, for instance we could create a kafka topic called 
+`enrichments_load` by generating load on it.  We could also create a new  kafka topic called `indexing_load` and configure the enrichment
+topology to output to it.  We would then generate load on `enrichments_load` and monitor `indexing_load`.
+```
+#Threadpool of size 5, you want somewhere between 5 and 10 depending on the throughput numbers you're trying to drive
+#Messages drawn from ~/dummy.templates, which is a message template per line
+#Generate at a rate of 9000 messages per second
+#Emit the data to a CSV file ~/measurements.csv
+$METRON_HOME/bin/load_tool.sh -p 5 -ot enrichments_load -mt indexing_load -t ~/dummy.templates -eps 9000 -z $ZOOKEEPER -c ~/measurements.csv
+```
+
+Now, with the help of a bash function and gnuplot we can generate a plot
+of the historical throughput measurements for `indexing_load`:
+```
+# Ensure that you have installed gnuplot and the liberation font package
+# via yum install -y gnuplot liberation-sans-fonts
+# We will define a plot function that will generate a png plot.  It takes
+# one arg, the output file.  It expects to have a 2 column CSV streamed
+#  with the first dimension being the timestamp and the second dimension
+# being what you want plotted.
+plot() {
+  awk -F, '{printf "%d %d\n", $1/1000, $2} END { print "e" }' | gnuplot -e "reset;clear;set style fill solid 1.0 border -1; set nokey;set title 'Throughput Measured'; set xlabel 'Time'; set boxwidth 0.5; set xtics rotate; set ylabel 'events/sec';set xdata time; set timefmt '%s';set format x '%H:%M:%S';set term png enhanced font '/usr/share/fonts/liberation/LiberationSans-Regular.ttf' 12 size 900,400; set output '$1';plot '< cat -' using 1:2 with line lt -1 lw 2;"
+}
+
+# We want to transform the CSV file into a space separated file with the
+# timestamp followed by the throughput measurements.
+cat ~/measurements.csv | awk -F, '{printf "%d,%d\n", $1, $8 }' | plot performance_measurement.png
+```
+This generates a plot like so to `performance_measurement.png`:
+![Performance Measurement](performance_measurement.png)

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/performance_measurement.png
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/performance_measurement.png b/metron-contrib/metron-performance/performance_measurement.png
new file mode 100644
index 0000000..c4dcfb1
Binary files /dev/null and b/metron-contrib/metron-performance/performance_measurement.png differ

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/pom.xml
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/pom.xml b/metron-contrib/metron-performance/pom.xml
new file mode 100644
index 0000000..4242110
--- /dev/null
+++ b/metron-contrib/metron-performance/pom.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software
+	Foundation (ASF) under one or more contributor license agreements. See the
+	NOTICE file distributed with this work for additional information regarding
+	copyright ownership. The ASF licenses this file to You under the Apache License,
+	Version 2.0 (the "License"); you may not use this file except in compliance
+	with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+	Unless required by applicable law or agreed to in writing, software distributed
+	under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
+	OR CONDITIONS OF ANY KIND, either express or implied. See the License for
+  the specific language governing permissions and limitations under the License.
+  -->
+
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <name>metron-performance</name>
+  <groupId>org.apache.metron</groupId>
+  <artifactId>metron-performance</artifactId>
+  <packaging>jar</packaging>
+  <parent>
+    <groupId>org.apache.metron</groupId>
+    <artifactId>metron-contrib</artifactId>
+    <version>0.4.3</version>
+  </parent>
+  <description>Performance Testing Utilities</description>
+  <url>https://metron.apache.org/</url>
+
+  <dependencies>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>${global_guava_version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.metron</groupId>
+      <artifactId>metron-common</artifactId>
+      <version>${project.parent.version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.kafka</groupId>
+      <artifactId>kafka_2.10</artifactId>
+      <version>${global_kafka_version}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.12</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-shade-plugin</artifactId>
+        <version>${global_shade_version}</version>
+        <configuration>
+          <createDependencyReducedPom>true</createDependencyReducedPom>
+          <artifactSet>
+            <excludes>
+              <exclude>*slf4j*</exclude>
+            </excludes>
+          </artifactSet>
+        </configuration>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>shade</goal>
+            </goals>
+            <configuration>
+              <filters>
+                <filter>
+                  <artifact>*:*</artifact>
+                  <excludes>
+                    <exclude>META-INF/*.SF</exclude>
+                    <exclude>META-INF/*.DSA</exclude>
+                    <exclude>META-INF/*.RSA</exclude>
+                  </excludes>
+                </filter>
+              </filters>
+              <relocations>
+                <relocation>
+                  <pattern>com.google.common</pattern>
+                  <shadedPattern>org.apache.metron.perf.guava</shadedPattern>
+                </relocation>
+              </relocations>
+              <transformers>
+                <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                  <resources>
+                    <resource>.yaml</resource>
+                    <resource>LICENSE.txt</resource>
+                    <resource>ASL2.0</resource>
+                    <resource>NOTICE.txt</resource>
+                  </resources>
+                </transformer>
+                <!-- UNCOMMENT THIS IF YOU NEED TO REGENERATE THE BEST GUESS NOTICES FILE WHICH REQUIRES PRUNING EVERY RELEASE -->
+                <!--transformer implementation="org.apache.maven.plugins.shade.resource.ApacheNoticeResourceTransformer">
+                                <addHeader>false</addHeader>
+                                <projectName>${project.name}</projectName>
+                </transformer-->
+                <transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
+                <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
+                  <mainClass></mainClass>
+                </transformer>
+              </transformers>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptor>src/main/assembly/assembly.xml</descriptor>
+        </configuration>
+        <executions>
+          <execution>
+            <id>make-assembly</id> <!-- this is used for inheritance merges -->
+            <phase>package</phase> <!-- bind to the packaging phase -->
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/assembly/assembly.xml
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/assembly/assembly.xml b/metron-contrib/metron-performance/src/main/assembly/assembly.xml
new file mode 100644
index 0000000..3595284
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/assembly/assembly.xml
@@ -0,0 +1,42 @@
+<!--
+  Licensed to the Apache Software
+	Foundation (ASF) under one or more contributor license agreements. See the
+	NOTICE file distributed with this work for additional information regarding
+	copyright ownership. The ASF licenses this file to You under the Apache License,
+	Version 2.0 (the "License"); you may not use this file except in compliance
+	with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+	Unless required by applicable law or agreed to in writing, software distributed
+	under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
+	OR CONDITIONS OF ANY KIND, either express or implied. See the License for
+  the specific language governing permissions and limitations under the License.
+  -->
+
+<assembly>
+  <id>archive</id>
+  <formats>
+    <format>tar.gz</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <fileSets>
+    <fileSet>
+      <directory>${project.basedir}/src/main/scripts</directory>
+      <outputDirectory>bin</outputDirectory>
+      <useDefaultExcludes>true</useDefaultExcludes>
+      <excludes>
+        <exclude>**/*.formatted</exclude>
+        <exclude>**/*.filtered</exclude>
+      </excludes>
+      <fileMode>0755</fileMode>
+      <lineEnding>unix</lineEnding>
+      <filtered>true</filtered>
+    </fileSet>
+    <fileSet>
+      <directory>${project.basedir}/target</directory>
+      <includes>
+        <include>${project.artifactId}-${project.version}.jar</include>
+      </includes>
+      <outputDirectory>lib</outputDirectory>
+      <useDefaultExcludes>true</useDefaultExcludes>
+    </fileSet>
+  </fileSets>
+</assembly>

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/LoadGenerator.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/LoadGenerator.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/LoadGenerator.java
new file mode 100644
index 0000000..33f777b
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/LoadGenerator.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load;
+
+
+import com.google.common.base.Joiner;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.PosixParser;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.apache.metron.common.utils.KafkaUtils;
+import org.apache.metron.performance.load.monitor.AbstractMonitor;
+import org.apache.metron.performance.load.monitor.EPSGeneratedMonitor;
+import org.apache.metron.performance.load.monitor.EPSThroughputWrittenMonitor;
+import org.apache.metron.performance.load.monitor.MonitorTask;
+import org.apache.metron.performance.load.monitor.writers.CSVWriter;
+import org.apache.metron.performance.load.monitor.writers.ConsoleWriter;
+import org.apache.metron.performance.load.monitor.writers.Writable;
+import org.apache.metron.performance.load.monitor.writers.Writer;
+import org.apache.metron.performance.sampler.BiasedSampler;
+import org.apache.metron.performance.sampler.Sampler;
+import org.apache.metron.performance.sampler.UnbiasedSampler;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
+
+public class LoadGenerator
+{
+  public static String CONSUMER_GROUP = "metron.load.group";
+  public static long SEND_PERIOD_MS = 100;
+  public static long MONITOR_PERIOD_MS = 1000*10;
+  private static ExecutorService pool;
+  private static ThreadLocal<KafkaProducer<String, String>> kafkaProducer;
+  public static AtomicLong numSent = new AtomicLong(0);
+
+  public static void main( String[] args ) throws Exception {
+    CommandLine cli = LoadOptions.parse(new PosixParser(), args);
+    EnumMap<LoadOptions, Optional<Object>> evaluatedArgs = LoadOptions.createConfig(cli);
+    Map<String, Object> kafkaConfig = new HashMap<>();
+    kafkaConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
+    kafkaConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
+    kafkaConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
+    kafkaConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
+    if(LoadOptions.ZK.has(cli)) {
+      String zkQuorum = (String) evaluatedArgs.get(LoadOptions.ZK).get();
+      kafkaConfig.put( ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG
+                     , Joiner.on(",").join(KafkaUtils.INSTANCE.getBrokersFromZookeeper(zkQuorum))
+                     );
+    }
+    String groupId = evaluatedArgs.get(LoadOptions.CONSUMER_GROUP).get().toString();
+    System.out.println("Consumer Group: " + groupId);
+    kafkaConfig.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
+    if(LoadOptions.KAFKA_CONFIG.has(cli)) {
+      kafkaConfig.putAll((Map<String, Object>) evaluatedArgs.get(LoadOptions.KAFKA_CONFIG).get());
+    }
+    kafkaProducer = ThreadLocal.withInitial(() -> new KafkaProducer<>(kafkaConfig));
+    int numThreads = (int)evaluatedArgs.get(LoadOptions.NUM_THREADS).get();
+    System.out.println("Thread pool size: " + numThreads);
+    pool = Executors.newFixedThreadPool(numThreads);
+    Optional<Object> eps = evaluatedArgs.get(LoadOptions.EPS);
+
+    Optional<Object> outputTopic = evaluatedArgs.get(LoadOptions.OUTPUT_TOPIC);
+    Optional<Object> monitorTopic = evaluatedArgs.get(LoadOptions.MONITOR_TOPIC);
+    long sendDelta = (long) evaluatedArgs.get(LoadOptions.SEND_DELTA).get();
+    long monitorDelta = (long) evaluatedArgs.get(LoadOptions.MONITOR_DELTA).get();
+    if((eps.isPresent() && outputTopic.isPresent()) || monitorTopic.isPresent()) {
+      Timer timer = new Timer(false);
+      long startTimeMs = System.currentTimeMillis();
+      if(outputTopic.isPresent() && eps.isPresent()) {
+        List<String> templates = (List<String>)evaluatedArgs.get(LoadOptions.TEMPLATE).get();
+        if(templates.isEmpty()) {
+          System.out.println("Empty templates, so nothing to do.");
+          return;
+        }
+        Optional<Object> biases = evaluatedArgs.get(LoadOptions.BIASED_SAMPLE);
+        Sampler sampler = new UnbiasedSampler();
+        if(biases.isPresent()){
+          sampler = new BiasedSampler((List<Map.Entry<Integer, Integer>>) biases.get(), templates.size());
+        }
+        MessageGenerator generator = new MessageGenerator(templates, sampler);
+        Long targetLoad = (Long)eps.get();
+        int periodsPerSecond = (int)(1000/sendDelta);
+        long messagesPerPeriod = targetLoad/periodsPerSecond;
+        String outputTopicStr = (String)outputTopic.get();
+        System.out.println("Generating data to " + outputTopicStr + " at " + targetLoad + " events per second");
+        System.out.println("Sending " + messagesPerPeriod + " messages to " + outputTopicStr + " every " + sendDelta + "ms");
+        timer.scheduleAtFixedRate(new SendToKafka( outputTopicStr
+                                                 , messagesPerPeriod
+                                                 , numThreads
+                                                 , generator
+                                                 , pool
+                                                 , numSent
+                                                 , kafkaProducer
+                                                 )
+                                 , 0, sendDelta);
+      }
+      List<AbstractMonitor> monitors = new ArrayList<>();
+      if(outputTopic.isPresent() && monitorTopic.isPresent()) {
+        System.out.println("Monitoring " + monitorTopic.get() + " every " + monitorDelta + " ms");
+        monitors.add(new EPSGeneratedMonitor(outputTopic, numSent));
+        monitors.add(new EPSThroughputWrittenMonitor(monitorTopic, kafkaConfig));
+      }
+      else if(outputTopic.isPresent() && !monitorTopic.isPresent()) {
+        System.out.println("Monitoring " + outputTopic.get() + " every " + monitorDelta + " ms");
+        monitors.add(new EPSGeneratedMonitor(outputTopic, numSent));
+        monitors.add(new EPSThroughputWrittenMonitor(outputTopic, kafkaConfig));
+      }
+      else if(!outputTopic.isPresent() && monitorTopic.isPresent()) {
+        System.out.println("Monitoring " + monitorTopic.get() + " every " + monitorDelta + " ms");
+        monitors.add(new EPSThroughputWrittenMonitor(monitorTopic, kafkaConfig));
+      }
+      else if(!outputTopic.isPresent() && !monitorTopic.isPresent()) {
+        System.out.println("You have not specified an output topic or a monitoring topic, so I have nothing to do here.");
+      }
+      int lookback = (int) evaluatedArgs.get(LoadOptions.SUMMARY_LOOKBACK).get();
+      if(lookback > 0) {
+        System.out.println("Summarizing over the last " + lookback + " monitoring periods (" + lookback*monitorDelta + "ms)");
+      }
+      else {
+        System.out.println("Turning off summarization.");
+      }
+      final CSVWriter csvWriter = new CSVWriter((File) evaluatedArgs.get(LoadOptions.CSV).orElse(null));
+      Writer writer = new Writer(monitors, lookback, new ArrayList<Consumer<Writable>>() {{
+        add(new ConsoleWriter());
+        add(csvWriter);
+      }});
+      timer.scheduleAtFixedRate(new MonitorTask(writer), 0, monitorDelta);
+      Optional<Object> timeLimit = evaluatedArgs.get(LoadOptions.TIME_LIMIT);
+      if(timeLimit.isPresent()) {
+        System.out.println("Ending in " + timeLimit.get() + " ms.");
+        timer.schedule(new TimerTask() {
+                         @Override
+                         public void run() {
+                           timer.cancel();
+                           long durationS = (System.currentTimeMillis() - startTimeMs)/1000;
+                           System.out.println("\nGenerated " + numSent.get() + " in " + durationS + " seconds." );
+                           csvWriter.close();
+                           System.exit(0);
+                         }
+                       }
+
+                , (Long) timeLimit.get());
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/LoadOptions.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/LoadOptions.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/LoadOptions.java
new file mode 100644
index 0000000..b4d217d
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/LoadOptions.java
@@ -0,0 +1,499 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load;
+
+import com.google.common.base.Joiner;
+import org.apache.commons.cli.*;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.common.utils.cli.CLIOptions;
+import org.apache.metron.performance.sampler.BiasedSampler;
+import org.apache.metron.stellar.common.utils.ConversionUtils;
+import org.apache.metron.common.utils.cli.OptionHandler;
+
+import javax.annotation.Nullable;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Optional;
+
+public enum LoadOptions implements CLIOptions<LoadOptions> {
+  HELP(new OptionHandler<LoadOptions>() {
+
+    @Override
+    public String getShortCode() {
+      return "h";
+    }
+
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      return new Option(s, "help", false, "Generate Help screen");
+    }
+  }),
+  ZK(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "zk_quorum", true, "zookeeper quorum");
+      o.setArgName("QUORUM");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(option.has(cli)) {
+        return Optional.ofNullable(option.get(cli));
+      }
+      else {
+        return Optional.empty();
+      }
+    }
+
+    @Override
+    public String getShortCode() {
+      return "z";
+    }
+  }),
+  CONSUMER_GROUP(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "consumer_group", true, "Consumer Group.  The default is " + LoadGenerator.CONSUMER_GROUP);
+      o.setArgName("GROUP_ID");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(option.has(cli)) {
+        return Optional.ofNullable(option.get(cli));
+      }
+      else {
+        return Optional.of(LoadGenerator.CONSUMER_GROUP);
+      }
+    }
+
+    @Override
+    public String getShortCode() {
+      return "cg";
+    }
+  }),
+  BIASED_SAMPLE(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "sample_bias", true, "The discrete distribution to bias the sampling. " +
+              "This is a CSV of 2 columns.  The first column is the % of the templates " +
+              "and the 2nd column is the probability (0-100) that it's chosen.  For instance:\n" +
+              "  20,80\n" +
+              "  80,20\n" +
+              "implies that 20% of the templates will comprise 80% of the output and the remaining 80% of the templates will comprise 20% of the output.");
+      o.setArgName("BIAS_FILE");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(!option.has(cli)) {
+        return Optional.empty();
+      }
+      File discreteDistributionFile  = new File(option.get(cli));
+      if(discreteDistributionFile.exists()) {
+        try (BufferedReader br = new BufferedReader(new FileReader(discreteDistributionFile))){
+          return Optional.ofNullable(BiasedSampler.readDistribution(br));
+        } catch (IOException e) {
+          throw new IllegalStateException("Unable to read distribution file: " + option.get(cli), e);
+        }
+      }
+      else {
+        throw new IllegalStateException("Unable to read distribution file: " + option.get(cli) + " file doesn't exist.");
+      }
+    }
+
+    @Override
+    public String getShortCode() {
+      return "bs";
+    }
+  })
+  ,CSV(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "csv", true, "A CSV file to emit monitoring data to.  " +
+              "The format is a CSV with the following schema: timestamp, (name, eps, historical_mean, historical_stddev)+");
+      o.setArgName("CSV_FILE");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(!option.has(cli)) {
+        return Optional.empty();
+      }
+      return Optional.of(new File(option.get(cli)));
+    }
+
+    @Override
+    public String getShortCode() {
+      return "c";
+    }
+  })
+  ,TEMPLATE(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "template", true, "The template file to use for generation.  This should be a file with a template per line with $METRON_TS and $METRON_GUID in the spots for timestamp and guid, if you so desire them.");
+      o.setArgName("TEMPLATE_FILE");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(!option.has(cli)) {
+        return Optional.empty();
+      }
+      File templateFile = new File(option.get(cli));
+      if(templateFile.exists()) {
+        List<String> templates = new ArrayList<>();
+        try(BufferedReader br = new BufferedReader(new FileReader(templateFile))) {
+          for(String line = null;(line = br.readLine()) != null;) {
+            templates.add(line);
+          }
+          return Optional.of(templates);
+        } catch (IOException e) {
+          throw new IllegalStateException("Unable to read template file: " + option.get(cli), e);
+        }
+      }
+      else {
+        throw new IllegalStateException("Unable to read template file: " + option.get(cli) + " file doesn't exist.");
+      }
+    }
+
+    @Override
+    public String getShortCode() {
+      return "t";
+    }
+  })
+  ,SUMMARY_LOOKBACK(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "lookback", true, "When summarizing, how many monitoring periods should we summarize over?  If 0, then no summary.  Default: 5");
+      o.setArgName("LOOKBACK");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(option.has(cli)) {
+        return Optional.of(ConversionUtils.convert(option.get(cli), Integer.class));
+      }
+      else {
+        return Optional.of(5);
+      }
+    }
+
+    @Override
+    public String getShortCode() {
+      return "l";
+    }
+  })
+  ,EPS(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "eps", true, "The target events per second");
+      o.setArgName("EPS");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(option.has(cli)) {
+        return Optional.of(ConversionUtils.convert(option.get(cli), Long.class));
+      }
+      else {
+        return Optional.empty();
+      }
+    }
+
+    @Override
+    public String getShortCode() {
+      return "e";
+    }
+  })
+  ,KAFKA_CONFIG(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "kafka_config", true, "The kafka config.  This is a file containing a JSON map with the kafka config.");
+      o.setArgName("CONFIG_FILE");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(!option.has(cli)) {
+        return Optional.empty();
+      }
+      File configFile = new File(option.get(cli));
+      if(configFile.exists()) {
+        try {
+          return Optional.ofNullable(JSONUtils.INSTANCE.load(configFile, JSONUtils.MAP_SUPPLIER));
+        } catch (FileNotFoundException e) {
+          throw new IllegalStateException("Unable to read file: " + option.get(cli), e);
+        } catch (IOException e) {
+          throw new IllegalStateException("Unable to read file: " + option.get(cli), e);
+        }
+      }
+      else {
+        throw new IllegalStateException("Unable to read file: " + option.get(cli) + " file doesn't exist.");
+      }
+    }
+
+    @Override
+    public String getShortCode() {
+      return "k";
+    }
+  }),
+  SEND_DELTA(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "send_delta_ms", true, "The time (in ms) between sending a batch of messages. Default is " + LoadGenerator.SEND_PERIOD_MS);
+      o.setArgName("TIME_IN_MS");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(option.has(cli)) {
+        Object res = option.get(cli);
+        return Optional.ofNullable(ConversionUtils.convert(res, Long.class));
+      }
+      return Optional.of(LoadGenerator.SEND_PERIOD_MS);
+
+    }
+
+    @Override
+    public String getShortCode() {
+      return "sd";
+    }
+  }),
+  MONITOR_DELTA(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "monitor_delta_ms", true, "The time (in ms) between monitoring output. Default is " + LoadGenerator.MONITOR_PERIOD_MS);
+      o.setArgName("TIME_IN_MS");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(option.has(cli)) {
+        Object res = option.get(cli);
+        return Optional.ofNullable(ConversionUtils.convert(res, Long.class));
+      }
+      return Optional.of(LoadGenerator.MONITOR_PERIOD_MS);
+
+    }
+
+    @Override
+    public String getShortCode() {
+      return "md";
+    }
+  })
+  ,TIME_LIMIT(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "time_limit_ms", true, "The total amount of time to run this in milliseconds.  By default, it never stops.");
+      o.setArgName("MS");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      if(option.has(cli)) {
+        Object res = option.get(cli);
+        Long timeMs = ConversionUtils.convert(res, Long.class);
+        return Optional.ofNullable(timeMs);
+      }
+      return Optional.empty();
+
+    }
+
+    @Override
+    public String getShortCode() {
+      return "tl";
+    }
+  })
+  ,NUM_THREADS(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "threads", true, "The number of threads to use when extracting data.  The default is the number of cores of your machine.");
+      o.setArgName("NUM_THREADS");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      int numThreads = Runtime.getRuntime().availableProcessors();
+      if(option.has(cli)) {
+        Object res = option.get(cli);
+        if(res instanceof String && res.toString().toUpperCase().endsWith("C")) {
+          numThreads *= ConversionUtils.convert(res.toString().trim().replace("C", ""), Integer.class);
+        }
+        else {
+          numThreads = ConversionUtils.convert(res, Integer.class);
+        }
+      }
+      return Optional.of(numThreads);
+
+    }
+
+    @Override
+    public String getShortCode() {
+      return "p";
+    }
+  })
+  ,OUTPUT_TOPIC(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "output_topic", true, "The kafka topic to write to");
+      o.setArgName("TOPIC");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      return Optional.ofNullable(option.get(cli));
+    }
+
+    @Override
+    public String getShortCode() {
+      return "ot";
+    }
+  }),
+  MONITOR_TOPIC(new OptionHandler<LoadOptions>() {
+    @Nullable
+    @Override
+    public Option apply(@Nullable String s) {
+      Option o = new Option(s, "monitor_topic", true, "The kafka topic to monitor.");
+      o.setArgName("TOPIC");
+      o.setRequired(false);
+      return o;
+    }
+
+    @Override
+    public Optional<Object> getValue(LoadOptions option, CommandLine cli) {
+      return Optional.ofNullable(option.get(cli));
+    }
+
+    @Override
+    public String getShortCode() {
+      return "mt";
+    }
+  }),
+  ;
+  Option option;
+  String shortCode;
+  OptionHandler<LoadOptions> handler;
+  LoadOptions(OptionHandler<LoadOptions> optionHandler) {
+    this.shortCode = optionHandler.getShortCode();
+    this.handler = optionHandler;
+    this.option = optionHandler.apply(shortCode);
+  }
+
+  @Override
+  public Option getOption() {
+    return option;
+  }
+
+  public boolean has(CommandLine cli) {
+    return cli.hasOption(shortCode);
+  }
+
+  public String get(CommandLine cli) {
+    return cli.getOptionValue(shortCode);
+  }
+
+  @Override
+  public OptionHandler<LoadOptions> getHandler() {
+    return null;
+  }
+
+  public static CommandLine parse(CommandLineParser parser, String[] args) {
+    try {
+      CommandLine cli = parser.parse(getOptions(), args);
+      if(HELP.has(cli)) {
+        printHelp();
+        System.exit(0);
+      }
+      return cli;
+    } catch (ParseException e) {
+      System.err.println("Unable to parse args: " + Joiner.on(' ').join(args));
+      e.printStackTrace(System.err);
+      printHelp();
+      System.exit(-1);
+      return null;
+    }
+  }
+
+  public static EnumMap<LoadOptions, Optional<Object> > createConfig(CommandLine cli) {
+    EnumMap<LoadOptions, Optional<Object> > ret = new EnumMap<>(LoadOptions.class);
+    for(LoadOptions option : values()) {
+      ret.put(option, option.handler.getValue(option, cli));
+    }
+    return ret;
+  }
+
+  public static void printHelp() {
+    HelpFormatter formatter = new HelpFormatter();
+    formatter.printHelp( "Generator", getOptions());
+  }
+
+  public static Options getOptions() {
+    Options ret = new Options();
+    for(LoadOptions o : LoadOptions.values()) {
+      ret.addOption(o.option);
+    }
+    return ret;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/MessageGenerator.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/MessageGenerator.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/MessageGenerator.java
new file mode 100644
index 0000000..572d438
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/MessageGenerator.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load;
+
+import org.apache.metron.performance.sampler.Sampler;
+
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
+
+public class MessageGenerator implements Supplier<String> {
+  private static ThreadLocal<Random> rng = ThreadLocal.withInitial(() -> new Random());
+  private static AtomicLong guidOffset = new AtomicLong(0);
+  private static String guidPrefix = "00000000-0000-0000-0000-";
+  private List<String> patterns;
+  private Sampler sampler;
+  public MessageGenerator(List<String> patterns, Sampler sampler) {
+    this.patterns = patterns;
+    this.sampler = sampler;
+  }
+
+  @Override
+  public String get() {
+    int sample = sampler.sample(rng.get(), patterns.size());
+    String pattern = patterns.get(sample);
+    long guidId = guidOffset.getAndIncrement();
+    String guid = guidPrefix + guidId;
+    String ts = "" + System.currentTimeMillis();
+    return pattern.replace("$METRON_TS", ts)
+                            .replace("$METRON_GUID", guid);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/SendToKafka.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/SendToKafka.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/SendToKafka.java
new file mode 100644
index 0000000..67bf469
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/SendToKafka.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load;
+
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.TimerTask;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
+
+public class SendToKafka extends TimerTask {
+  private long numMessagesSent;
+  private long numSentLast = 0;
+  private long batchSize;
+  private int numBatches;
+  private Supplier<String> messageSupplier;
+  private String kafkaTopic;
+  private ExecutorService pool;
+  protected AtomicLong numSent;
+  private ThreadLocal<KafkaProducer<String, String>> kafkaProducer;
+  public SendToKafka( String kafkaTopic
+                    , long numMessagesSent
+                    , int numBatches
+                    , Supplier<String> messageSupplier
+                    , ExecutorService pool
+                    , AtomicLong numSent
+                    , ThreadLocal<KafkaProducer<String, String>> kafkaProducer
+                    )
+  {
+    this.numSent = numSent;
+    this.kafkaProducer = kafkaProducer;
+    this.pool = pool;
+    this.numMessagesSent = numMessagesSent;
+    this.messageSupplier = messageSupplier;
+    this.numBatches = numBatches;
+    this.batchSize = numMessagesSent/numBatches;
+    this.kafkaTopic = kafkaTopic;
+  }
+
+  @Override
+  public void run() {
+    long numSentCurrent = numSent.get();
+    long numSentSince = numSentCurrent - numSentLast;
+    boolean sendMessages = numSentLast == 0 || numSentSince >= numMessagesSent;
+    if(sendMessages) {
+      Collection<Future<Long>> futures = Collections.synchronizedList(new ArrayList<>());
+      for(int batch = 0;batch < numBatches;++batch) {
+        try {
+          futures.add(pool.submit(() -> {
+            KafkaProducer<String, String> producer = kafkaProducer.get();
+            Collection<Future<?>> b = Collections.synchronizedCollection(new ArrayList<>());
+            for (int i = 0; i < batchSize; ++i) {
+              b.add(sendToKafka(producer, kafkaTopic, messageSupplier.get()));
+            }
+            for(Future<?> f : b) {
+              f.get();
+            }
+            return batchSize;
+          }));
+
+        } catch (Exception e) {
+          e.printStackTrace(System.err);
+        }
+      }
+      for(Future<Long> f : futures) {
+        try {
+          f.get();
+        } catch (Exception e) {
+          e.printStackTrace(System.err);
+        }
+      }
+      numSentLast = numSentCurrent;
+    }
+  }
+
+  protected Future<?> sendToKafka(KafkaProducer<String, String> producer, String kafkaTopic, String message) {
+    return producer.send(new ProducerRecord<>(kafkaTopic, message),
+                      (recordMetadata, e) -> {
+                        if(e != null) {
+                          e.printStackTrace(System.err);
+                        }
+                        numSent.incrementAndGet();
+                      }
+              );
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/AbstractMonitor.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/AbstractMonitor.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/AbstractMonitor.java
new file mode 100644
index 0000000..80cb5cc
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/AbstractMonitor.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor;
+
+import java.util.Optional;
+import java.util.function.Supplier;
+
+public abstract class AbstractMonitor implements Supplier<Long>, MonitorNaming {
+  private static final double EPSILON = 1e-6;
+  protected Optional<?> kafkaTopic;
+  protected long timestampPrevious = 0;
+  public AbstractMonitor(Optional<?> kafkaTopic) {
+    this.kafkaTopic = kafkaTopic;
+  }
+
+  protected abstract Long monitor(double deltaTs);
+
+  @Override
+  public Long get() {
+    long timeStarted = System.currentTimeMillis();
+    Long ret = null;
+    if(timestampPrevious > 0) {
+      double deltaTs = (timeStarted - timestampPrevious) / 1000.0;
+      if (Math.abs(deltaTs) > EPSILON) {
+        ret = monitor(deltaTs);
+      }
+    }
+    timestampPrevious = timeStarted;
+    return ret;
+  }
+
+  public abstract String format();
+
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/EPSGeneratedMonitor.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/EPSGeneratedMonitor.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/EPSGeneratedMonitor.java
new file mode 100644
index 0000000..3e380bb
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/EPSGeneratedMonitor.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor;
+
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class EPSGeneratedMonitor extends AbstractMonitor {
+  private AtomicLong numSent;
+  private long numSentPrevious = 0;
+  public EPSGeneratedMonitor(Optional<?> kafkaTopic, AtomicLong numSent) {
+    super(kafkaTopic);
+    this.numSent = numSent;
+  }
+
+  @Override
+  protected Long monitor(double deltaTs) {
+    if(kafkaTopic.isPresent()) {
+      long totalProcessed = numSent.get();
+      long written = (totalProcessed - numSentPrevious);
+      long epsWritten = (long) (written / deltaTs);
+      numSentPrevious = totalProcessed;
+      return epsWritten;
+    }
+    return null;
+  }
+
+  @Override
+  public String format() {
+    return "%d eps generated to " + kafkaTopic.get();
+  }
+
+  @Override
+  public String name() {
+    return "generated";
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/EPSThroughputWrittenMonitor.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/EPSThroughputWrittenMonitor.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/EPSThroughputWrittenMonitor.java
new file mode 100644
index 0000000..96efd1d
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/EPSThroughputWrittenMonitor.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor;
+
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.metron.performance.util.KafkaUtil;
+
+import java.util.Map;
+import java.util.Optional;
+
+public class EPSThroughputWrittenMonitor extends AbstractMonitor {
+  Map<Integer, Long> lastOffsetMap = null;
+  KafkaConsumer<String, String> consumer;
+  public EPSThroughputWrittenMonitor(Optional<?> kafkaTopic, Map<String, Object> kafkaProps) {
+    super(kafkaTopic);
+    consumer = new KafkaConsumer<>(kafkaProps);
+  }
+
+  private Long writtenSince(Map<Integer, Long> partitionOffsets, Map<Integer, Long> lastOffsetMap) {
+    if(partitionOffsets == null) {
+      return null;
+    }
+    long sum = 0;
+    for(Map.Entry<Integer, Long> partitionOffset : partitionOffsets.entrySet()) {
+      sum += partitionOffset.getValue() - lastOffsetMap.get(partitionOffset.getKey());
+    }
+    return sum;
+  }
+
+  @Override
+  protected Long monitor(double deltaTs) {
+    Optional<Long> epsWritten = Optional.empty();
+    if(kafkaTopic.isPresent()) {
+      if(lastOffsetMap != null) {
+        Map<Integer, Long> currentOffsets = KafkaUtil.INSTANCE.getKafkaOffsetMap(consumer, (String) kafkaTopic.get());
+        Long eventsWrittenSince = writtenSince(currentOffsets, lastOffsetMap);
+        if (eventsWrittenSince != null) {
+          epsWritten = Optional.of((long) (eventsWrittenSince / deltaTs));
+        }
+        lastOffsetMap = currentOffsets == null ? lastOffsetMap : currentOffsets;
+        if (epsWritten.isPresent()) {
+          return epsWritten.get();
+        }
+      }
+      else {
+        lastOffsetMap = KafkaUtil.INSTANCE.getKafkaOffsetMap(consumer, (String)kafkaTopic.get());
+      }
+    }
+    return null;
+  }
+
+  @Override
+  public String format() {
+    return "%d eps throughput measured for " + kafkaTopic.get();
+  }
+
+  @Override
+  public String name() {
+    return "throughput measured";
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/MonitorNaming.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/MonitorNaming.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/MonitorNaming.java
new file mode 100644
index 0000000..4833c17
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/MonitorNaming.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor;
+
+public interface MonitorNaming {
+  String format();
+  String name();
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/MonitorTask.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/MonitorTask.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/MonitorTask.java
new file mode 100644
index 0000000..1e02a00
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/MonitorTask.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor;
+
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+import org.apache.metron.performance.load.monitor.writers.Writer;
+
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.TimerTask;
+
+public class MonitorTask extends TimerTask {
+  private Writer writer;
+  public MonitorTask(Writer writer) {
+    this.writer = writer;
+  }
+
+  /**
+   * The action to be performed by this timer task.
+   */
+  @Override
+  public void run() {
+    writer.writeAll();
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/Results.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/Results.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/Results.java
new file mode 100644
index 0000000..e094b74
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/Results.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor;
+
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+
+import java.util.Optional;
+
+public class Results {
+  private String format;
+  private String name;
+  private Optional<DescriptiveStatistics> history;
+  private Long eps;
+  public Results(String format, String name, Long eps, Optional<DescriptiveStatistics> history) {
+    this.format = format;
+    this.name = name;
+    this.history = history;
+    this.eps = eps;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public Long getEps() {
+    return eps;
+  }
+
+  public String getFormat() {
+    return format;
+  }
+
+  public Optional<DescriptiveStatistics> getHistory() {
+    return history;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/CSVWriter.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/CSVWriter.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/CSVWriter.java
new file mode 100644
index 0000000..112206d
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/CSVWriter.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor.writers;
+
+import com.google.common.base.Joiner;
+import org.apache.metron.performance.load.monitor.Results;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.function.Consumer;
+
+public class CSVWriter implements Consumer<Writable> {
+  private Optional<PrintWriter> pw = Optional.empty();
+
+  public CSVWriter(File outFile) throws IOException {
+    if(outFile != null) {
+      pw = Optional.of(new PrintWriter(new FileWriter(outFile)));
+    }
+  }
+
+  @Override
+  public void accept(Writable writable) {
+    if(pw.isPresent()) {
+      List<String> parts = new ArrayList<>();
+      parts.add("" + writable.getDate().getTime());
+      for (Results r : writable.getResults()) {
+        parts.add(r.getName());
+        parts.add(r.getEps() == null?"":(r.getEps() + ""));
+        if (r.getHistory().isPresent()) {
+          parts.add("" + (int) r.getHistory().get().getMean());
+          parts.add("" + (int) Math.sqrt(r.getHistory().get().getVariance()));
+        } else {
+          parts.add("");
+          parts.add("");
+        }
+      }
+      pw.get().println(Joiner.on(",").join(parts));
+      pw.get().flush();
+    }
+  }
+
+  public void close() {
+    if(pw.isPresent()) {
+      pw.get().close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/ConsoleWriter.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/ConsoleWriter.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/ConsoleWriter.java
new file mode 100644
index 0000000..efb2ad3
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/ConsoleWriter.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor.writers;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+import org.apache.metron.performance.load.monitor.Results;
+
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.function.Consumer;
+
+public class ConsoleWriter implements Consumer<Writable> {
+
+  private String getSummary(DescriptiveStatistics stats) {
+    return String.format("Mean: %d, Std Dev: %d", (int)stats.getMean(), (int)Math.sqrt(stats.getVariance()));
+  }
+
+  @Override
+  public void accept(Writable writable) {
+    List<String> parts = new ArrayList<>();
+    Date date = writable.getDate();
+    for(Results r : writable.getResults()) {
+      Long eps = r.getEps();
+      if(eps != null) {
+        String part = String.format(r.getFormat(), eps);
+        if (r.getHistory().isPresent()) {
+          part += " (" + getSummary(r.getHistory().get()) + ")";
+        }
+        parts.add(part);
+      }
+    }
+    if(date != null) {
+      DateFormat dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss");
+      String header = dateFormat.format(date) + " - ";
+      String emptyHeader = StringUtils.repeat(" ", header.length());
+      for (int i = 0; i < parts.size(); ++i) {
+        String part = parts.get(i);
+        if (i == 0) {
+          System.out.println(header + (part == null ? "" : part));
+        } else {
+          System.out.println(emptyHeader + (part == null ? "" : part));
+        }
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/Writable.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/Writable.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/Writable.java
new file mode 100644
index 0000000..3ed62bf
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/Writable.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor.writers;
+
+import org.apache.metron.performance.load.monitor.Results;
+
+import java.util.Date;
+import java.util.List;
+
+public class Writable {
+  private Date date;
+  private List<Results> results;
+  public Writable(Date date, List<Results> results) {
+    this.date = date;
+    this.results = results;
+  }
+
+  public Date getDate() {
+    return date;
+  }
+
+  public List<Results> getResults() {
+    return results;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/Writer.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/Writer.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/Writer.java
new file mode 100644
index 0000000..a9d915b
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/load/monitor/writers/Writer.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load.monitor.writers;
+
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+import org.apache.metron.performance.load.monitor.AbstractMonitor;
+import org.apache.metron.performance.load.monitor.Results;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Optional;
+import java.util.function.Consumer;
+
+public class Writer {
+
+  private int summaryLookback;
+  private List<LinkedList<Double>> summaries = new ArrayList<>();
+  private List<Consumer<Writable>> writers;
+  private List<AbstractMonitor> monitors;
+
+  public Writer(List<AbstractMonitor> monitors, int summaryLookback, List<Consumer<Writable>> writers) {
+    this.summaryLookback = summaryLookback;
+    this.writers = writers;
+    this.monitors = monitors;
+    for(AbstractMonitor m : monitors) {
+      this.summaries.add(new LinkedList<>());
+    }
+  }
+
+  public void writeAll() {
+    int i = 0;
+    Date dateOf = new Date();
+    List<Results> results = new ArrayList<>();
+    for(AbstractMonitor m : monitors) {
+      Long eps = m.get();
+      if(eps != null && summaryLookback > 0) {
+          LinkedList<Double> summary = summaries.get(i);
+          addToLookback(eps.doubleValue(), summary);
+          results.add(new Results(m.format(), m.name(), eps, Optional.of(getStats(summary))));
+      }
+      else {
+        results.add(new Results(m.format(), m.name(), eps, Optional.empty()));
+      }
+      i++;
+    }
+    Writable writable = new Writable(dateOf, results);
+    for(Consumer<Writable> writer : writers) {
+      writer.accept(writable);
+    }
+  }
+
+  private void addToLookback(Double d, LinkedList<Double> lookback) {
+    if(lookback.size() >= summaryLookback) {
+      lookback.removeFirst();
+    }
+    lookback.addLast(d);
+  }
+
+  public DescriptiveStatistics getStats(List<Double> avg) {
+    DescriptiveStatistics stats = new DescriptiveStatistics();
+    for(Double d : avg) {
+      if(d == null || Double.isNaN(d)) {
+        continue;
+      }
+      stats.addValue(d);
+    }
+    return stats;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/BiasedSampler.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/BiasedSampler.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/BiasedSampler.java
new file mode 100644
index 0000000..f0a5b2c
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/BiasedSampler.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.sampler;
+
+import com.google.common.base.Splitter;
+import com.google.common.collect.Iterables;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.TreeMap;
+
+public class BiasedSampler implements Sampler {
+  TreeMap<Double, Map.Entry<Integer, Integer>> discreteDistribution;
+  public BiasedSampler(List<Map.Entry<Integer, Integer>>  discreteDistribution, int max) {
+    this.discreteDistribution = createDistribution(discreteDistribution, max);
+  }
+
+  public static List<Map.Entry<Integer, Integer>> readDistribution(BufferedReader distrFile) throws IOException {
+    return readDistribution(distrFile, false);
+  }
+
+  public static List<Map.Entry<Integer, Integer>> readDistribution(BufferedReader distrFile, boolean quiet) throws IOException {
+    List<Map.Entry<Integer, Integer>> ret = new ArrayList<>();
+    if(!quiet) {
+      System.out.println("Using biased sampler with the following biases:");
+    }
+    int sumLeft = 0;
+    int sumRight = 0;
+    for(String line = null;(line = distrFile.readLine()) != null;) {
+      if(line.startsWith("#")) {
+        continue;
+      }
+      Iterable<String> it = Splitter.on(",").split(line.trim());
+      if(Iterables.size(it) != 2) {
+        throw new IllegalArgumentException(line + " should be a comma separated pair of integers, but was not.");
+      }
+      int left = Integer.parseInt(Iterables.getFirst(it, null));
+      int right = Integer.parseInt(Iterables.getLast(it, null));
+      if(left <= 0 || left > 100) {
+        throw new IllegalArgumentException(line + ": " + (left < 0?left:right) + " must a positive integer in (0, 100]");
+      }
+      if(right <= 0 || right > 100) {
+        throw new IllegalArgumentException(line + ": " + right + " must a positive integer in (0, 100]");
+      }
+      if(!quiet) {
+        System.out.println("\t" + left + "% of templates will comprise roughly " + right + "% of sample output");
+      }
+      ret.add(new AbstractMap.SimpleEntry<>(left, right));
+      sumLeft += left;
+      sumRight += right;
+    }
+    if(sumLeft > 100 || sumRight > 100 ) {
+      throw new IllegalStateException("Neither columns must sum to beyond 100.  " +
+              "The first column is the % of templates. " +
+              "The second column is the % of the sample that % of template occupies.");
+    }
+    else if(sumLeft < 100 && sumRight < 100) {
+      int left = 100 - sumLeft;
+      int right = 100 - sumRight;
+      if(!quiet) {
+        System.out.println("\t" + left + "% of templates will comprise roughly " + right + "% of sample output");
+      }
+      ret.add(new AbstractMap.SimpleEntry<>(left, right));
+    }
+    return ret;
+
+  }
+
+  private static TreeMap<Double, Map.Entry<Integer, Integer>>
+                 createDistribution(List<Map.Entry<Integer, Integer>>  discreteDistribution, int max) {
+    TreeMap<Double, Map.Entry<Integer, Integer>> ret = new TreeMap<>();
+    int from = 0;
+    double weight = 0.0d;
+    for(Map.Entry<Integer, Integer> kv : discreteDistribution) {
+      double pctVals = kv.getKey()/100.0;
+      int to = from + (int)(max*pctVals);
+      double pctWeight = kv.getValue()/100.0;
+      ret.put(weight, new AbstractMap.SimpleEntry<>(from, to));
+      weight += pctWeight;
+      from = to;
+    }
+    return ret;
+  }
+
+  @Override
+  public int sample(Random rng, int limit) {
+    double weight = rng.nextDouble();
+    Map.Entry<Integer, Integer> range = discreteDistribution.floorEntry(weight).getValue();
+    return rng.nextInt(range.getValue() - range.getKey()) + range.getKey();
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/Sampler.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/Sampler.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/Sampler.java
new file mode 100644
index 0000000..e5f03c8
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/Sampler.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.sampler;
+
+import java.util.Random;
+
+public interface Sampler {
+  int sample(Random rng, int limit);
+}


[35/50] [abbrv] metron git commit: METRON-1347: Indexing Topology should fail tuples without a source.type (cstella via mmiklavc) closes apache/metron#863

Posted by rm...@apache.org.
METRON-1347: Indexing Topology should fail tuples without a source.type (cstella via mmiklavc) closes apache/metron#863


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/bfe90ef1
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/bfe90ef1
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/bfe90ef1

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: bfe90ef1e579be53a14d9fd0e4dc19fc6a81baf0
Parents: 53124d9
Author: cstella <ce...@gmail.com>
Authored: Fri Apr 13 11:17:00 2018 -0600
Committer: Michael Miklavcic <mi...@gmail.com>
Committed: Fri Apr 13 11:17:00 2018 -0600

----------------------------------------------------------------------
 .../bolt/BulkMessageWriterBoltTest.java         | 25 ++++++++++
 metron-platform/metron-indexing/README.md       |  6 +++
 .../writer/bolt/BulkMessageWriterBolt.java      | 51 ++++++++++++++------
 3 files changed, 68 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/bfe90ef1/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/bolt/BulkMessageWriterBoltTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/bolt/BulkMessageWriterBoltTest.java b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/bolt/BulkMessageWriterBoltTest.java
index 308638e..dedf5e6 100644
--- a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/bolt/BulkMessageWriterBoltTest.java
+++ b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/bolt/BulkMessageWriterBoltTest.java
@@ -118,6 +118,31 @@ public class BulkMessageWriterBoltTest extends BaseEnrichmentBoltTest {
   private MessageGetStrategy messageGetStrategy;
 
   @Test
+  public void testSensorTypeMissing() throws Exception {
+    BulkMessageWriterBolt bulkMessageWriterBolt = new BulkMessageWriterBolt("zookeeperUrl")
+            .withBulkMessageWriter(bulkMessageWriter).withMessageGetter(MessageGetters.JSON_FROM_FIELD.name())
+            .withMessageGetterField("message");
+    bulkMessageWriterBolt.setCuratorFramework(client);
+    bulkMessageWriterBolt.setZKCache(cache);
+    bulkMessageWriterBolt.getConfigurations().updateSensorIndexingConfig(sensorType,
+            new FileInputStream(sampleSensorIndexingConfigPath));
+
+    bulkMessageWriterBolt.declareOutputFields(declarer);
+    verify(declarer, times(1)).declareStream(eq("error"), argThat(
+            new FieldsMatcher("message")));
+    Map stormConf = new HashMap();
+    bulkMessageWriterBolt.prepare(stormConf, topologyContext, outputCollector);
+    BulkWriterComponent<JSONObject> component = mock(BulkWriterComponent.class);
+    bulkMessageWriterBolt.setWriterComponent(component);
+    verify(bulkMessageWriter, times(1)).init(eq(stormConf),any(TopologyContext.class), any(WriterConfiguration.class));
+    JSONObject message = (JSONObject) new JSONParser().parse(sampleMessageString);
+    message.remove("source.type");
+    when(tuple.getValueByField("message")).thenReturn(message);
+    bulkMessageWriterBolt.execute(tuple);
+    verify(component, times(1)).error(eq("null"), any(), any(), any());
+  }
+
+  @Test
   public void testFlushOnBatchSize() throws Exception {
     BulkMessageWriterBolt bulkMessageWriterBolt = new BulkMessageWriterBolt("zookeeperUrl")
             .withBulkMessageWriter(bulkMessageWriter).withMessageGetter(MessageGetters.JSON_FROM_FIELD.name())

http://git-wip-us.apache.org/repos/asf/metron/blob/bfe90ef1/metron-platform/metron-indexing/README.md
----------------------------------------------------------------------
diff --git a/metron-platform/metron-indexing/README.md b/metron-platform/metron-indexing/README.md
index d351d7c..f4a4501 100644
--- a/metron-platform/metron-indexing/README.md
+++ b/metron-platform/metron-indexing/README.md
@@ -32,6 +32,12 @@ Indices are written in batch and the batch size and batch timeout are specified
 [Sensor Indexing Configuration](#sensor-indexing-configuration) via the `batchSize` and `batchTimeout` parameters.
 These configs are variable by sensor type.
 
+## Minimal Assumptions for Message Structure
+
+At minimum, a message should have a `source.type` field.
+Without this field, the message tuple will be failed and not written
+with an appropriate error indicated in the Storm UI and logs.
+
 ## Indexing Architecture
 
 ![Architecture](indexing_arch.png)

http://git-wip-us.apache.org/repos/asf/metron/blob/bfe90ef1/metron-platform/metron-writer/src/main/java/org/apache/metron/writer/bolt/BulkMessageWriterBolt.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-writer/src/main/java/org/apache/metron/writer/bolt/BulkMessageWriterBolt.java b/metron-platform/metron-writer/src/main/java/org/apache/metron/writer/bolt/BulkMessageWriterBolt.java
index 8202604..b5b97d8 100644
--- a/metron-platform/metron-writer/src/main/java/org/apache/metron/writer/bolt/BulkMessageWriterBolt.java
+++ b/metron-platform/metron-writer/src/main/java/org/apache/metron/writer/bolt/BulkMessageWriterBolt.java
@@ -17,6 +17,7 @@
  */
 package org.apache.metron.writer.bolt;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.metron.common.Constants;
 import org.apache.metron.common.bolt.ConfiguredIndexingBolt;
 import org.apache.metron.common.configuration.writer.IndexingWriterConfiguration;
@@ -125,6 +126,13 @@ public class BulkMessageWriterBolt extends ConfiguredIndexingBolt {
     return defaultBatchTimeout;
   }
 
+  public BulkWriterComponent<JSONObject> getWriterComponent() {
+    return writerComponent;
+  }
+
+  public void setWriterComponent(BulkWriterComponent<JSONObject> component) {
+    writerComponent = component;
+  }
   /**
    * This method is called by TopologyBuilder.createTopology() to obtain topology and
    * bolt specific configuration parameters.  We use it primarily to configure how often
@@ -160,9 +168,11 @@ public class BulkMessageWriterBolt extends ConfiguredIndexingBolt {
     return conf;
   }
 
+
+
   @Override
   public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
-    this.writerComponent = new BulkWriterComponent<>(collector);
+    setWriterComponent(new BulkWriterComponent<>(collector));
     this.collector = collector;
     super.prepare(stormConf, context, collector);
     if (messageGetField != null) {
@@ -185,7 +195,7 @@ public class BulkMessageWriterBolt extends ConfiguredIndexingBolt {
         BatchTimeoutHelper timeoutHelper = new BatchTimeoutHelper(writerconf::getAllConfiguredTimeouts, batchTimeoutDivisor);
         defaultBatchTimeout = timeoutHelper.getDefaultBatchTimeout();
       }
-      writerComponent.setDefaultBatchTimeout(defaultBatchTimeout);
+      getWriterComponent().setDefaultBatchTimeout(defaultBatchTimeout);
       bulkMessageWriter.init(stormConf, context, writerconf);
     } catch (Exception e) {
       throw new RuntimeException(e);
@@ -197,7 +207,7 @@ public class BulkMessageWriterBolt extends ConfiguredIndexingBolt {
    */
   public void prepare(Map stormConf, TopologyContext context, OutputCollector collector, Clock clock) {
     prepare(stormConf, context, collector);
-    writerComponent.withClock(clock);
+    getWriterComponent().withClock(clock);
   }
 
   @SuppressWarnings("unchecked")
@@ -208,7 +218,7 @@ public class BulkMessageWriterBolt extends ConfiguredIndexingBolt {
         if (!(bulkMessageWriter instanceof WriterToBulkWriter)) {
           //WriterToBulkWriter doesn't allow batching, so no need to flush on Tick.
           LOG.debug("Flushing message queues older than their batchTimeouts");
-          writerComponent.flushTimeouts(bulkMessageWriter, configurationTransformation.apply(
+          getWriterComponent().flushTimeouts(bulkMessageWriter, configurationTransformation.apply(
                   new IndexingWriterConfiguration(bulkMessageWriter.getName(), getConfigurations()))
                   , messageGetStrategy);
         }
@@ -229,17 +239,30 @@ public class BulkMessageWriterBolt extends ConfiguredIndexingBolt {
       LOG.trace("Writing enrichment message: {}", message);
       WriterConfiguration writerConfiguration = configurationTransformation.apply(
               new IndexingWriterConfiguration(bulkMessageWriter.getName(), getConfigurations()));
-      if(writerConfiguration.isDefault(sensorType)) {
-        //want to warn, but not fail the tuple
-        collector.reportError(new Exception("WARNING: Default and (likely) unoptimized writer config used for " + bulkMessageWriter.getName() + " writer and sensor " + sensorType));
+      if(sensorType == null) {
+        //sensor type somehow ended up being null.  We want to error this message directly.
+        getWriterComponent().error("null"
+                             , new Exception("Sensor type is not specified for message "
+                                            + message.toJSONString()
+                                            )
+                             , ImmutableList.of(tuple)
+                             , messageGetStrategy
+                             );
+      }
+      else {
+        if (writerConfiguration.isDefault(sensorType)) {
+          //want to warn, but not fail the tuple
+          collector.reportError(new Exception("WARNING: Default and (likely) unoptimized writer config used for " + bulkMessageWriter.getName() + " writer and sensor " + sensorType));
+        }
+
+        getWriterComponent().write(sensorType
+                , tuple
+                , message
+                , bulkMessageWriter
+                , writerConfiguration
+                , messageGetStrategy
+        );
       }
-      writerComponent.write(sensorType
-                           , tuple
-                           , message
-                           , bulkMessageWriter
-                           , writerConfiguration
-                           , messageGetStrategy
-                           );
     }
     catch(Exception e) {
       throw new RuntimeException("This should have been caught in the writerComponent.  If you see this, file a JIRA", e);


[46/50] [abbrv] metron git commit: METRON-1529 CONFIG_GET Fails to Retrieve Latest Config When Run in Zeppelin REPL (nickwallen) closes apache/metron#997

Posted by rm...@apache.org.
METRON-1529 CONFIG_GET Fails to Retrieve Latest Config When Run in Zeppelin REPL (nickwallen) closes apache/metron#997


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/37e3fd32
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/37e3fd32
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/37e3fd32

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 37e3fd32c256ddc129eb7c1363d78e9095a39748
Parents: b5bf9a9
Author: nickwallen <ni...@nickallen.org>
Authored: Wed Apr 25 09:27:18 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Wed Apr 25 09:27:18 2018 -0400

----------------------------------------------------------------------
 .../configuration/ConfigurationsUtils.java      | 123 +++-
 .../management/ConfigurationFunctions.java      | 564 ++++++++++---------
 .../management/ConfigurationFunctionsTest.java  | 424 ++++++++++----
 .../shell/DefaultStellarShellExecutor.java      |   4 +-
 .../common/utils/StellarProcessorUtils.java     | 135 +++--
 5 files changed, 825 insertions(+), 425 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/37e3fd32/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/ConfigurationsUtils.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/ConfigurationsUtils.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/ConfigurationsUtils.java
index a89db63..c7b39f0 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/ConfigurationsUtils.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/ConfigurationsUtils.java
@@ -27,6 +27,7 @@ import com.fasterxml.jackson.databind.JsonNode;
 import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.IOException;
+import java.io.InputStream;
 import java.io.PrintStream;
 import java.lang.invoke.MethodHandles;
 import java.nio.file.Files;
@@ -45,6 +46,7 @@ import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.retry.ExponentialBackoffRetry;
 import org.apache.metron.common.Constants;
 import org.apache.metron.common.configuration.enrichment.SensorEnrichmentConfig;
+import org.apache.metron.common.configuration.profiler.ProfilerConfig;
 import org.apache.metron.common.utils.JSONUtils;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.stellar.dsl.StellarFunctions;
@@ -235,12 +237,99 @@ public class ConfigurationsUtils {
                               );
   }
 
+  /**
+   * Reads the global configuration stored in Zookeeper.
+   *
+   * @param client The Zookeeper client.
+   * @return The global configuration, if one exists.  Otherwise, null.
+   * @throws Exception
+   */
+  public static Map<String, Object> readGlobalConfigFromZookeeper(CuratorFramework client) throws Exception {
+    Map<String, Object> config = null;
+
+    Optional<byte[]> bytes = readFromZookeeperSafely(GLOBAL.getZookeeperRoot(), client);
+    if(bytes.isPresent()) {
+      InputStream in = new ByteArrayInputStream(bytes.get());
+      config = JSONUtils.INSTANCE.load(in, JSONUtils.MAP_SUPPLIER);
+    }
+
+    return config;
+  }
+
+  /**
+   * Reads the Indexing configuration from Zookeeper.
+   *
+   * @param sensorType The type of sensor.
+   * @param client The Zookeeper client.
+   * @return The indexing configuration for the given sensor type, if one exists.  Otherwise, null.
+   * @throws Exception
+   */
+  public static Map<String, Object> readSensorIndexingConfigFromZookeeper(String sensorType, CuratorFramework client) throws Exception {
+    Map<String, Object> config = null;
+
+    Optional<byte[]> bytes = readFromZookeeperSafely(INDEXING.getZookeeperRoot() + "/" + sensorType, client);
+    if(bytes.isPresent()) {
+      InputStream in = new ByteArrayInputStream(bytes.get());
+      config = JSONUtils.INSTANCE.load(in, JSONUtils.MAP_SUPPLIER);
+    }
+
+    return config;
+  }
+
+  /**
+   * Reads the Enrichment configuration from Zookeeper.
+   *
+   * @param sensorType The type of sensor.
+   * @param client The Zookeeper client.
+   * @return The Enrichment configuration for the given sensor type, if one exists. Otherwise, null.
+   * @throws Exception
+   */
   public static SensorEnrichmentConfig readSensorEnrichmentConfigFromZookeeper(String sensorType, CuratorFramework client) throws Exception {
-    return JSONUtils.INSTANCE.load(new ByteArrayInputStream(readFromZookeeper(ENRICHMENT.getZookeeperRoot() + "/" + sensorType, client)), SensorEnrichmentConfig.class);
+    SensorEnrichmentConfig config = null;
+
+    Optional<byte[]> bytes = readFromZookeeperSafely(ENRICHMENT.getZookeeperRoot() + "/" + sensorType, client);
+    if (bytes.isPresent()) {
+      config = SensorEnrichmentConfig.fromBytes(bytes.get());
+    }
+
+    return config;
   }
 
+  /**
+   * Reads the Parser configuration from Zookeeper.
+   *
+   * @param sensorType The type of sensor.
+   * @param client The Zookeeper client.
+   * @return The Parser configuration for the given sensor type, if one exists. Otherwise, null.
+   * @throws Exception
+   */
   public static SensorParserConfig readSensorParserConfigFromZookeeper(String sensorType, CuratorFramework client) throws Exception {
-    return JSONUtils.INSTANCE.load(new ByteArrayInputStream(readFromZookeeper(PARSER.getZookeeperRoot() + "/" + sensorType, client)), SensorParserConfig.class);
+    SensorParserConfig config = null;
+
+    Optional<byte[]> bytes = readFromZookeeperSafely(PARSER.getZookeeperRoot() + "/" + sensorType, client);
+    if(bytes.isPresent()) {
+      config = SensorParserConfig.fromBytes(bytes.get());
+    }
+
+    return config;
+  }
+
+  /**
+   * Reads the Profiler configuration from Zookeeper.
+   *
+   * @param client The Zookeeper client.
+   * @return THe Profiler configuration.
+   * @throws Exception
+   */
+  public static ProfilerConfig readProfilerConfigFromZookeeper(CuratorFramework client) throws Exception {
+    ProfilerConfig config = null;
+
+    Optional<byte[]> bytes = readFromZookeeperSafely(PROFILER.getZookeeperRoot(), client);
+    if(bytes.isPresent()) {
+      config = ProfilerConfig.fromBytes(bytes.get());
+    }
+
+    return config;
   }
 
   public static byte[] readGlobalConfigBytesFromZookeeper(CuratorFramework client) throws Exception {
@@ -289,6 +378,36 @@ public class ConfigurationsUtils {
     }
   }
 
+  /**
+   * Read raw bytes from Zookeeper.
+   *
+   * @param path The path to the Zookeeper node to read.
+   * @param client The Zookeeper client.
+   * @return The bytes read from Zookeeper, if node exists.  Otherwise, null.
+   * @throws Exception
+   */
+  public static Optional<byte[]> readFromZookeeperSafely(String path, CuratorFramework client) throws Exception {
+    Optional<byte[]> result = Optional.empty();
+
+    try {
+      byte[] bytes = readFromZookeeper(path, client);
+      result = Optional.of(bytes);
+
+    } catch(KeeperException.NoNodeException e) {
+      LOG.debug("Zookeeper node missing; path={}", e);
+    }
+
+    return result;
+  }
+
+  /**
+   * Read raw bytes from Zookeeper.
+   *
+   * @param path The path to the Zookeeper node to read.
+   * @param client The Zookeeper client.
+   * @return The bytes read from Zookeeper.
+   * @throws Exception If the path does not exist in Zookeeper.
+   */
   public static byte[] readFromZookeeper(String path, CuratorFramework client) throws Exception {
     if (client != null && client.getData() != null && path != null) {
       return client.getData().forPath(path);

http://git-wip-us.apache.org/repos/asf/metron/blob/37e3fd32/metron-platform/metron-management/src/main/java/org/apache/metron/management/ConfigurationFunctions.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-management/src/main/java/org/apache/metron/management/ConfigurationFunctions.java b/metron-platform/metron-management/src/main/java/org/apache/metron/management/ConfigurationFunctions.java
index af90e14..5a1281c 100644
--- a/metron-platform/metron-management/src/main/java/org/apache/metron/management/ConfigurationFunctions.java
+++ b/metron-platform/metron-management/src/main/java/org/apache/metron/management/ConfigurationFunctions.java
@@ -18,26 +18,17 @@
 package org.apache.metron.management;
 
 import com.fasterxml.jackson.core.JsonProcessingException;
-import com.google.common.base.Splitter;
-import com.google.common.collect.Iterables;
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
 import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.recipes.cache.TreeCache;
-import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
-import org.apache.curator.framework.recipes.cache.TreeCacheListener;
-import org.apache.metron.common.Constants;
 import org.apache.metron.common.configuration.ConfigurationType;
-import org.apache.metron.common.configuration.ConfigurationsUtils;
+import org.apache.metron.common.configuration.EnrichmentConfigurations;
 import org.apache.metron.common.configuration.IndexingConfigurations;
+import org.apache.metron.common.configuration.ParserConfigurations;
 import org.apache.metron.common.configuration.SensorParserConfig;
 import org.apache.metron.common.configuration.enrichment.SensorEnrichmentConfig;
+import org.apache.metron.common.configuration.profiler.ProfilerConfig;
+import org.apache.metron.common.configuration.profiler.ProfilerConfigurations;
 import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.common.zookeeper.ZKConfigurationsCache;
 import org.apache.metron.stellar.common.utils.ConversionUtils;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.stellar.dsl.ParseException;
@@ -46,203 +37,280 @@ import org.apache.metron.stellar.dsl.StellarFunction;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.ByteArrayInputStream;
+import java.lang.invoke.MethodHandles;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+import static java.lang.String.format;
+import static org.apache.metron.common.configuration.ConfigurationType.ENRICHMENT;
+import static org.apache.metron.common.configuration.ConfigurationType.GLOBAL;
+import static org.apache.metron.common.configuration.ConfigurationType.INDEXING;
+import static org.apache.metron.common.configuration.ConfigurationType.PARSER;
+import static org.apache.metron.common.configuration.ConfigurationType.PROFILER;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.readGlobalConfigBytesFromZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.readGlobalConfigFromZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.readProfilerConfigBytesFromZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.readProfilerConfigFromZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.readSensorEnrichmentConfigFromZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.readSensorIndexingConfigBytesFromZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.readSensorIndexingConfigFromZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.readSensorParserConfigFromZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.writeGlobalConfigToZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.writeProfilerConfigToZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.writeSensorEnrichmentConfigToZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.writeSensorIndexingConfigToZookeeper;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.writeSensorParserConfigToZookeeper;
+
+/**
+ * Defines functions that enable modification of Metron configuration values.
+ */
 public class ConfigurationFunctions {
+
   private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-  private static EnumMap<ConfigurationType, Object> configMap = new EnumMap<ConfigurationType, Object>(ConfigurationType.class) {{
-    for(ConfigurationType ct : ConfigurationType.values()) {
-      put(ct, Collections.synchronizedMap(new HashMap<String, String>()));
-    }
-    put(ConfigurationType.GLOBAL, "");
-    put(ConfigurationType.PROFILER, "");
-  }};
-  private static synchronized void setupTreeCache(Context context) throws Exception {
-    try {
-      Optional<Object> treeCacheOpt = context.getCapability("treeCache");
-      if (treeCacheOpt.isPresent()) {
-        return;
-      }
+
+
+  /**
+   * Retrieves the Zookeeper client from the execution context.
+   *
+   * @param context The execution context.
+   * @return A Zookeeper client, if one exists.  Otherwise, an exception is thrown.
+   */
+  private static CuratorFramework getZookeeperClient(Context context) {
+
+    Optional<Object> clientOpt = context.getCapability(Context.Capabilities.ZOOKEEPER_CLIENT, true);
+    if(clientOpt.isPresent()) {
+      return (CuratorFramework) clientOpt.get();
+
+    } else {
+      throw new IllegalStateException("Missing ZOOKEEPER_CLIENT; zookeeper connection required");
     }
-    catch(IllegalStateException ex) {
+  }
 
+  /**
+   * Get an argument from a list of arguments.
+   *
+   * @param index The index within the list of arguments.
+   * @param clazz The type expected.
+   * @param args All of the arguments.
+   * @param <T> The type of the argument expected.
+   */
+  public static <T> T getArg(int index, Class<T> clazz, List<Object> args) {
+
+    if(index >= args.size()) {
+      throw new IllegalArgumentException(format("expected at least %d argument(s), found %d", index+1, args.size()));
     }
-    Optional<Object> clientOpt = context.getCapability(Context.Capabilities.ZOOKEEPER_CLIENT);
-    if(!clientOpt.isPresent()) {
-      throw new IllegalStateException("I expected a zookeeper client to exist and it did not.  Please connect to zookeeper.");
+
+    return ConversionUtils.convert(args.get(index), clazz);
+  }
+
+  /**
+   * Serializes a configuration object to the raw JSON.
+   *
+   * @param object The configuration object to serialize
+   * @return
+   */
+  private static String toJSON(Object object) {
+
+    if(object == null) {
+      return null;
     }
-    CuratorFramework client = (CuratorFramework) clientOpt.get();
-    TreeCache cache = new TreeCache(client, Constants.ZOOKEEPER_TOPOLOGY_ROOT);
-    TreeCacheListener listener = new TreeCacheListener() {
-      @Override
-      public void childEvent(CuratorFramework client, TreeCacheEvent event) throws Exception {
-        if (event.getType().equals(TreeCacheEvent.Type.NODE_ADDED) || event.getType().equals(TreeCacheEvent.Type.NODE_UPDATED)) {
-          String path = event.getData().getPath();
-          byte[] data = event.getData().getData();
-          String sensor = Iterables.getLast(Splitter.on("/").split(path), null);
-          if (path.startsWith(ConfigurationType.PARSER.getZookeeperRoot())) {
-            Map<String, String> sensorMap = (Map<String, String>)configMap.get(ConfigurationType.PARSER);
-            sensorMap.put(sensor, new String(data));
-          } else if (ConfigurationType.GLOBAL.getZookeeperRoot().equals(path)) {
-            configMap.put(ConfigurationType.GLOBAL, new String(data));
-          } else if (ConfigurationType.PROFILER.getZookeeperRoot().equals(path)) {
-            configMap.put(ConfigurationType.PROFILER, new String(data));
-          } else if (path.startsWith(ConfigurationType.ENRICHMENT.getZookeeperRoot())) {
-            Map<String, String> sensorMap = (Map<String, String>)configMap.get(ConfigurationType.ENRICHMENT);
-            sensorMap.put(sensor, new String(data));
-          } else if (path.startsWith(ConfigurationType.INDEXING.getZookeeperRoot())) {
-            Map<String, String> sensorMap = (Map<String, String>)configMap.get(ConfigurationType.INDEXING);
-            sensorMap.put(sensor, new String(data));
-          }
-        }
-        else if(event.getType().equals(TreeCacheEvent.Type.NODE_REMOVED)) {
-          String path = event.getData().getPath();
-          String sensor = Iterables.getLast(Splitter.on("/").split(path), null);
-          if (path.startsWith(ConfigurationType.PARSER.getZookeeperRoot())) {
-            Map<String, String> sensorMap = (Map<String, String>)configMap.get(ConfigurationType.PARSER);
-            sensorMap.remove(sensor);
-          }
-          else if (path.startsWith(ConfigurationType.ENRICHMENT.getZookeeperRoot())) {
-            Map<String, String> sensorMap = (Map<String, String>)configMap.get(ConfigurationType.ENRICHMENT);
-            sensorMap.remove(sensor);
-          }
-          else if (path.startsWith(ConfigurationType.INDEXING.getZookeeperRoot())) {
-            Map<String, String> sensorMap = (Map<String, String>)configMap.get(ConfigurationType.INDEXING);
-            sensorMap.remove(sensor);
-          }
-          else if (ConfigurationType.PROFILER.getZookeeperRoot().equals(path)) {
-            configMap.put(ConfigurationType.PROFILER, null);
-          }
-          else if (ConfigurationType.GLOBAL.getZookeeperRoot().equals(path)) {
-            configMap.put(ConfigurationType.GLOBAL, null);
-          }
-        }
-      }
-    };
-    cache.getListenable().addListener(listener);
-    cache.start();
-    for(ConfigurationType ct : ConfigurationType.values()) {
-      switch(ct) {
-        case GLOBAL:
-        case PROFILER:
-          {
-            String data = "";
-            try {
-              byte[] bytes = ConfigurationsUtils.readFromZookeeper(ct.getZookeeperRoot(), client);
-              data = new String(bytes);
-            }
-            catch(Exception ex) {
-
-            }
-            configMap.put(ct, data);
-          }
-          break;
-        case INDEXING:
-        case ENRICHMENT:
-        case PARSER:
-          {
-            List<String> sensorTypes = client.getChildren().forPath(ct.getZookeeperRoot());
-            Map<String, String> sensorMap = (Map<String, String>)configMap.get(ct);
-            for(String sensorType : sensorTypes) {
-              sensorMap.put(sensorType, new String(ConfigurationsUtils.readFromZookeeper(ct.getZookeeperRoot() + "/" + sensorType, client)));
-            }
-          }
-          break;
-      }
+
+    try {
+      return JSONUtils.INSTANCE.toJSON(object, true);
+
+    } catch (JsonProcessingException e) {
+      throw new RuntimeException(e);
     }
-    context.addCapability("treeCache", () -> cache);
   }
 
   @Stellar(
-           namespace = "CONFIG"
-          ,name = "GET"
-          ,description = "Retrieve a Metron configuration from zookeeper."
-          ,params = {"type - One of ENRICHMENT, INDEXING, PARSER, GLOBAL, PROFILER"
-                    , "sensor - Sensor to retrieve (required for enrichment and parser, not used for profiler and global)"
-                    , "emptyIfNotPresent - If true, then return an empty, minimally viable config"
-                    }
-          ,returns = "The String representation of the config in zookeeper"
-          )
+          namespace = "CONFIG",
+          name = "GET",
+          description = "Retrieve a Metron configuration from zookeeper.",
+          params = {
+                  "type - One of ENRICHMENT, INDEXING, PARSER, GLOBAL, PROFILER",
+                  "sensor - Sensor to retrieve (required for enrichment and parser, not used for profiler and global)",
+                  "emptyIfNotPresent - If true, then return an empty, minimally viable config"
+          },
+          returns = "The String representation of the config in zookeeper")
   public static class ConfigGet implements StellarFunction {
-    boolean initialized = false;
+
+    /**
+     * Whether the function has been initialized.
+     */
+    private boolean initialized = false;
+
+    /**
+     * The Zookeeper client.
+     */
+    private CuratorFramework zkClient;
+
     @Override
     public Object apply(List<Object> args, Context context) throws ParseException {
-      ConfigurationType type = ConfigurationType.valueOf((String)args.get(0));
-      boolean emptyIfNotPresent = true;
+      String result;
 
-      switch(type) {
-        case GLOBAL:
-        case PROFILER:
-          return configMap.get(type);
-        case PARSER: {
-          String sensor = (String) args.get(1);
-          if(args.size() > 2) {
-            emptyIfNotPresent = ConversionUtils.convert(args.get(2), Boolean.class);
-          }
-          Map<String, String> sensorMap = (Map<String, String>) configMap.get(type);
-          String ret = sensorMap.get(sensor);
-          if (ret == null && emptyIfNotPresent ) {
-            SensorParserConfig config = new SensorParserConfig();
-            config.setSensorTopic(sensor);
-            try {
-              ret = JSONUtils.INSTANCE.toJSON(config, true);
-            } catch (JsonProcessingException e) {
-              LOG.error("Unable to serialize default object: {}", e.getMessage(), e);
-              throw new ParseException("Unable to serialize default object: " + e.getMessage(), e);
-            }
-          }
-          return ret;
-        }
-        case INDEXING: {
-          String sensor = (String) args.get(1);
-          if(args.size() > 2) {
-            emptyIfNotPresent = ConversionUtils.convert(args.get(2), Boolean.class);
-          }
-          Map<String, String> sensorMap = (Map<String, String>) configMap.get(type);
-          String ret = sensorMap.get(sensor);
-          if (ret == null && emptyIfNotPresent ) {
-            Map<String, Object> config = new HashMap<>();
-            try {
-              ret = JSONUtils.INSTANCE.toJSON(config, true);
-              IndexingConfigurations.setIndex(config, sensor);
-            } catch (JsonProcessingException e) {
-              LOG.error("Unable to serialize default object: {}", e.getMessage(), e);
-              throw new ParseException("Unable to serialize default object: " + e.getMessage(), e);
-            }
-          }
-          return ret;
-        }
-        case ENRICHMENT: {
-          String sensor = (String) args.get(1);
-          if(args.size() > 2) {
-            emptyIfNotPresent = ConversionUtils.convert(args.get(2), Boolean.class);
-          }
-          Map<String, String> sensorMap = (Map<String, String>) configMap.get(type);
-          String ret = sensorMap.get(sensor);
-          if (ret == null && emptyIfNotPresent ) {
-            SensorEnrichmentConfig config = new SensorEnrichmentConfig();
-            try {
-              ret = JSONUtils.INSTANCE.toJSON(config, true);
-            } catch (JsonProcessingException e) {
-              LOG.error("Unable to serialize default object: {}", e.getMessage(), e);
-              throw new ParseException("Unable to serialize default object: " + e.getMessage(), e);
-            }
-          }
-          return ret;
+      // the configuration type to write
+      String arg0 = getArg(0, String.class, args);
+      ConfigurationType type = ConfigurationType.valueOf(arg0);
+
+      try {
+
+        if (GLOBAL == type) {
+          result = getGlobalConfig(args);
+
+        } else if (PROFILER == type) {
+          result = getProfilerConfig(args);
+
+        } else if (ENRICHMENT == type) {
+          result = getEnrichmentConfig(args);
+
+        } else if (INDEXING == type) {
+          result = getIndexingConfig(args);
+
+        } else if (PARSER == type) {
+          result = getParserConfig(args);
+
+        } else {
+          throw new IllegalArgumentException("Unexpected configuration type: " + type);
         }
-        default:
-          throw new UnsupportedOperationException("Unable to support type " + type);
+
+      } catch(Exception e) {
+        throw new RuntimeException(e);
       }
+
+      return result;
     }
 
-    @Override
-    public void initialize(Context context) {
-      try {
-        setupTreeCache(context);
-      } catch (Exception e) {
-        LOG.error("Unable to initialize: {}", e.getMessage(), e);
+    /**
+     * Retrieves the Global configuration.
+     *
+     * @return The Global configuration.
+     * @throws Exception
+     */
+    private String getGlobalConfig(List<Object> args) throws Exception {
+
+      Map<String, Object> globals = readGlobalConfigFromZookeeper(zkClient);
+
+      // provide empty/default config if one is not present?
+      if(globals == null && emptyIfNotPresent(args)) {
+        globals = new HashMap<>();
       }
-      finally {
-        initialized = true;
+
+      return toJSON(globals);
+    }
+
+    /**
+     * Retrieves the Parser configuration.
+     *
+     * @param args The function arguments.
+     * @return The Parser configuration.
+     * @throws Exception
+     */
+    private String getParserConfig(List<Object> args) throws Exception {
+
+      // retrieve the enrichment config for the given sensor
+      String sensor = getArg(1, String.class, args);
+      SensorParserConfig sensorConfig = readSensorParserConfigFromZookeeper(sensor, zkClient);
+
+      // provide empty/default config if one is not present?
+      if(sensorConfig == null && emptyIfNotPresent(args)) {
+        sensorConfig = new SensorParserConfig();
       }
+
+     return toJSON(sensorConfig);
+    }
+
+    /**
+     * Retrieve the Enrichment configuration.
+     *
+     * @param args The function arguments.
+     * @return The Enrichment configuration as a JSON string.
+     * @throws Exception
+     */
+    private String getEnrichmentConfig(List<Object> args) throws Exception {
+
+      // retrieve the enrichment config for the given sensor
+      String sensor = getArg(1, String.class, args);
+      SensorEnrichmentConfig sensorConfig = readSensorEnrichmentConfigFromZookeeper(sensor, zkClient);
+
+      // provide empty/default config if one is not present?
+      if(sensorConfig == null && emptyIfNotPresent(args)) {
+        sensorConfig = new SensorEnrichmentConfig();
+      }
+
+      return toJSON(sensorConfig);
+    }
+
+    /**
+     * Retrieve the Indexing configuration.
+     *
+     * @param args The function arguments.
+     * @return The Indexing configuration as a JSON string.
+     * @throws Exception
+     */
+    private String getIndexingConfig(List<Object> args) throws Exception {
+
+      // retrieve the enrichment config for the given sensor
+      String sensor = getArg(1, String.class, args);
+      Map<String, Object> sensorConfig = readSensorIndexingConfigFromZookeeper(sensor, zkClient);
+
+      // provide empty/default config if one is not present?
+      if(sensorConfig == null && emptyIfNotPresent(args)) {
+        sensorConfig = Collections.emptyMap();
+      }
+
+      return toJSON(sensorConfig);
+    }
+
+    /**
+     * Retrieve the Profiler configuration.
+     *
+     * @param args The function arguments.
+     * @return The Profiler configuration as a JSON string.
+     * @throws Exception
+     */
+    private String getProfilerConfig(List<Object> args) throws Exception {
+
+      ProfilerConfig profilerConfig = readProfilerConfigFromZookeeper(zkClient);
+
+      // provide empty/default config if one is not present?
+      if(profilerConfig == null && emptyIfNotPresent(args)) {
+        profilerConfig = new ProfilerConfig();
+      }
+
+      return toJSON(profilerConfig);
+    }
+
+    /**
+     * Retrieves the 'emptyIfNotPresent' argument.
+     *
+     * <p>This determines whether a default configuration should be returned, if no
+     * configuration is not present.  This defaults to true.
+     *
+     * @param args The function arguments.
+     * @return The 'emptyIfNotPresent' argument.
+     * @throws Exception
+     */
+    private boolean emptyIfNotPresent(List<Object> args) {
+
+      boolean emptyIfNotPresent = true;
+      int lastIndex = args.size() - 1;
+
+      // expect 'emptyIfNotPresent' to always be the last boolean arg
+      if(args.size() >= 2 && args.get(lastIndex) instanceof Boolean) {
+        emptyIfNotPresent = getArg(lastIndex, Boolean.class, args);
+      }
+
+      return emptyIfNotPresent;
+    }
+
+    @Override
+    public void initialize(Context context) {
+      zkClient = getZookeeperClient(context);
     }
 
     @Override
@@ -250,91 +318,69 @@ public class ConfigurationFunctions {
       return initialized;
     }
   }
+
   @Stellar(
-           namespace = "CONFIG"
-          ,name = "PUT"
-          ,description = "Updates a Metron config to Zookeeper."
-          ,params = {"type - One of ENRICHMENT, INDEXING, PARSER, GLOBAL, PROFILER"
-                    ,"config - The config (a string in JSON form) to update"
-                    , "sensor - Sensor to retrieve (required for enrichment and parser, not used for profiler and global)"
-                    }
-          ,returns = "The String representation of the config in zookeeper"
-          )
+          namespace = "CONFIG",
+          name = "PUT",
+          description = "Updates a Metron config to Zookeeper.",
+          params = {
+                  "type - One of ENRICHMENT, INDEXING, PARSER, GLOBAL, PROFILER",
+                  "config - The config (a string in JSON form) to update",
+                  "sensor - Sensor to retrieve (required for enrichment and parser, not used for profiler and global)"
+          },
+          returns = "The String representation of the config in zookeeper")
   public static class ConfigPut implements StellarFunction {
-    private CuratorFramework client;
-    private boolean initialized = false;
 
     @Override
     public Object apply(List<Object> args, Context context) throws ParseException {
-      ConfigurationType type = ConfigurationType.valueOf((String)args.get(0));
-      String config = (String)args.get(1);
-      if(config == null) {
-        return null;
-      }
-      try {
-        switch (type) {
-          case GLOBAL:
-            ConfigurationsUtils.writeGlobalConfigToZookeeper(config.getBytes(), client);
-            break;
-          case PROFILER:
-            ConfigurationsUtils.writeProfilerConfigToZookeeper(config.getBytes(), client);
-            break;
-          case ENRICHMENT:
-          {
-            String sensor = (String) args.get(2);
-            if(sensor == null) {
-              return null;
-            }
-            ConfigurationsUtils.writeSensorEnrichmentConfigToZookeeper(sensor, config.getBytes(), client);
-          }
-          break;
-          case INDEXING:
-          {
-            String sensor = (String) args.get(2);
-            if(sensor == null) {
-              return null;
-            }
-            ConfigurationsUtils.writeSensorIndexingConfigToZookeeper(sensor, config.getBytes(), client);
-          }
-          break;
-          case PARSER:
-            {
-            String sensor = (String) args.get(2);
-              if(sensor == null) {
-              return null;
-            }
-            ConfigurationsUtils.writeSensorParserConfigToZookeeper(sensor, config.getBytes(), client);
+
+      // the configuration type to write
+      String arg0 = getArg(0, String.class, args);
+      ConfigurationType type = ConfigurationType.valueOf(arg0);
+
+      // the configuration value to write
+      String value = getArg(1, String.class, args);
+      if(value != null) {
+
+        CuratorFramework client = getZookeeperClient(context);
+        try {
+
+          if(GLOBAL == type) {
+            writeGlobalConfigToZookeeper(value.getBytes(), client);
+
+          } else if(PROFILER == type) {
+            writeProfilerConfigToZookeeper(value.getBytes(), client);
+
+          } else if(ENRICHMENT == type) {
+            String sensor = getArg(2, String.class, args);
+            writeSensorEnrichmentConfigToZookeeper(sensor, value.getBytes(), client);
+
+          } else if(INDEXING == type) {
+            String sensor = getArg(2, String.class, args);
+            writeSensorIndexingConfigToZookeeper(sensor, value.getBytes(), client);
+
+          } else if (PARSER == type) {
+            String sensor = getArg(2, String.class, args);
+            writeSensorParserConfigToZookeeper(sensor, value.getBytes(), client);
           }
-          break;
+
+        } catch(Exception e) {
+          LOG.error("Unexpected exception: {}", e.getMessage(), e);
+          throw new ParseException(e.getMessage());
         }
       }
-      catch(Exception ex) {
-        LOG.error("Unable to put config: {}", ex.getMessage(), ex);
-        throw new ParseException("Unable to put config: " + ex.getMessage(), ex);
-      }
+
       return null;
     }
 
     @Override
     public void initialize(Context context) {
-      Optional<Object> clientOpt = context.getCapability(Context.Capabilities.ZOOKEEPER_CLIENT);
-      if(!clientOpt.isPresent()) {
-        throw new IllegalStateException("I expected a zookeeper client to exist and it did not.  Please connect to zookeeper.");
-      }
-      client = (CuratorFramework) clientOpt.get();
-      try {
-        setupTreeCache(context);
-      } catch (Exception e) {
-        LOG.error("Unable to initialize: {}", e.getMessage(), e);
-      }
-      finally {
-        initialized = true;
-      }
+      // nothing to do
     }
 
     @Override
     public boolean isInitialized() {
-      return initialized;
+      return true;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/37e3fd32/metron-platform/metron-management/src/test/java/org/apache/metron/management/ConfigurationFunctionsTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-management/src/test/java/org/apache/metron/management/ConfigurationFunctionsTest.java b/metron-platform/metron-management/src/test/java/org/apache/metron/management/ConfigurationFunctionsTest.java
index 1920031..67e2a9d 100644
--- a/metron-platform/metron-management/src/test/java/org/apache/metron/management/ConfigurationFunctionsTest.java
+++ b/metron-platform/metron-management/src/test/java/org/apache/metron/management/ConfigurationFunctionsTest.java
@@ -19,194 +19,393 @@ package org.apache.metron.management;
 
 import com.google.common.collect.ImmutableMap;
 import org.adrianwalker.multilinestring.Multiline;
+import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.PosixParser;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.test.TestingServer;
 import org.apache.log4j.Level;
 import org.apache.metron.common.cli.ConfigurationManager;
 import org.apache.metron.common.configuration.ConfigurationsUtils;
+import org.apache.metron.common.configuration.SensorParserConfig;
+import org.apache.metron.common.configuration.enrichment.SensorEnrichmentConfig;
+import org.apache.metron.common.configuration.profiler.ProfilerConfig;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.stellar.dsl.ParseException;
 import org.apache.metron.test.utils.UnitTestHelper;
-import org.json.simple.parser.JSONParser;
 import org.json.simple.JSONObject;
-import org.junit.Assert;
+import org.json.simple.parser.JSONParser;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.util.HashMap;
+import java.util.Collections;
+import java.util.Map;
 
 import static org.apache.metron.TestConstants.PARSER_CONFIGS_PATH;
 import static org.apache.metron.TestConstants.SAMPLE_CONFIG_PATH;
+import static org.apache.metron.common.configuration.ConfigurationType.GLOBAL;
+import static org.apache.metron.common.configuration.ConfigurationType.PROFILER;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.writeProfilerConfigToZookeeper;
 import static org.apache.metron.management.utils.FileUtils.slurp;
 import static org.apache.metron.stellar.common.utils.StellarProcessorUtils.run;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
+/**
+ * Tests the ConfigurationFunctions class.
+ */
 public class ConfigurationFunctionsTest {
+
   private static TestingServer testZkServer;
-  private static CuratorFramework client;
   private static String zookeeperUrl;
-  private Context context = new Context.Builder()
-            .with(Context.Capabilities.ZOOKEEPER_CLIENT, () -> client)
-            .build();
+  private static CuratorFramework client;
+  private static String goodGlobalConfig = slurp( SAMPLE_CONFIG_PATH+ "/global.json");
+  private static String goodTestEnrichmentConfig = slurp( SAMPLE_CONFIG_PATH + "/enrichments/test.json");
+  private static String goodBroParserConfig = slurp(PARSER_CONFIGS_PATH + "/parsers/bro.json");
+  private static String goodTestIndexingConfig = slurp( SAMPLE_CONFIG_PATH + "/indexing/test.json");
+
+  private Context context;
+  private JSONParser parser;
+
+  /**
+   * {
+   *   "profiles" : [
+   *      {
+   *        "profile" : "counter",
+   *        "foreach" : "ip_src_addr",
+   *        "init"    : { "counter" : 0 },
+   *        "update"  : { "counter" : "counter + 1" },
+   *        "result"  : "counter"
+   *      }
+   *   ],
+   *   "timestampField" : "timestamp"
+   * }
+   */
+  @Multiline
+  private static String goodProfilerConfig;
+
   @BeforeClass
-  public static void setup() throws Exception {
+  public static void setupZookeeper() throws Exception {
+
+    // zookeeper server
     testZkServer = new TestingServer(true);
     zookeeperUrl = testZkServer.getConnectString();
+
+    // zookeeper client
     client = ConfigurationsUtils.getClient(zookeeperUrl);
     client.start();
+  }
 
-    pushConfigs(SAMPLE_CONFIG_PATH);
-    pushConfigs(PARSER_CONFIGS_PATH);
+  @Before
+  public void setup() throws Exception {
 
+    context = new Context.Builder()
+            .with(Context.Capabilities.ZOOKEEPER_CLIENT, () -> client)
+            .build();
+
+    parser = new JSONParser();
 
+    // push configs to zookeeper
+    pushConfigs(SAMPLE_CONFIG_PATH, zookeeperUrl);
+    pushConfigs(PARSER_CONFIGS_PATH, zookeeperUrl);
+    writeProfilerConfigToZookeeper(goodProfilerConfig.getBytes(), client);
   }
 
-  private static void pushConfigs(String inputPath) throws Exception {
-    String[] args = new String[]{
-            "-z", zookeeperUrl
-            , "--mode", "PUSH"
-            , "--input_dir", inputPath
-    };
-    ConfigurationManager manager = new ConfigurationManager();
-    manager.run(ConfigurationManager.ConfigurationOptions.parse(new PosixParser(), args));
+  /**
+   * Deletes a path within Zookeeper.
+   *
+   * @param path The path within Zookeeper to delete.
+   * @throws Exception
+   */
+  private void deletePath(String path) throws Exception {
+    client.delete().forPath(path);
   }
 
+  /**
+   * Transforms a String to a {@link JSONObject}.
+   *
+   * @param input The input String to transform
+   * @return A {@link JSONObject}.
+   * @throws org.json.simple.parser.ParseException
+   */
+  private JSONObject toJSONObject(String input) throws org.json.simple.parser.ParseException {
 
-  static String goodBroParserConfig = slurp(PARSER_CONFIGS_PATH + "/parsers/bro.json");
+    if(input == null) {
+      return null;
+    }
+    return (JSONObject) parser.parse(input.trim());
+  }
 
   /**
-    {
-      "sensorTopic" : "brop",
-      "parserConfig" : { },
-      "fieldTransformations" : [ ],
-      "readMetadata":false,
-      "mergeMetadata":false,
-      "parserParallelism" : 1,
-      "errorWriterParallelism" : 1,
-      "spoutNumTasks" : 1,
-      "stormConfig" : {},
-      "errorWriterNumTasks":1,
-      "spoutConfig":{},
-      "parserNumTasks":1,
-      "spoutParallelism":1
-    }
+   * Push configuration values to Zookeeper.
+   *
+   * @param inputPath The local filesystem path to the configurations.
+   * @param zookeeperUrl The URL of Zookeeper.
+   * @throws Exception
    */
-  @Multiline
-  static String defaultBropParserConfig;
+  private static void pushConfigs(String inputPath, String zookeeperUrl) throws Exception {
+
+    String[] args = new String[] {
+            "-z", zookeeperUrl,
+            "--mode", "PUSH",
+            "--input_dir", inputPath
+    };
+    CommandLine cli = ConfigurationManager.ConfigurationOptions.parse(new PosixParser(), args);
 
+    ConfigurationManager manager = new ConfigurationManager();
+    manager.run(cli);
+  }
 
+  /**
+   * The CONFIG_GET function should be able to return the Parser configuration
+   * for a given sensor.
+   */
   @Test
-  public void testParserGetHappyPath() {
+  public void testGetParser() throws Exception {
+
+    String out = (String) run("CONFIG_GET('PARSER', 'bro')", context);
 
-    Object out = run("CONFIG_GET('PARSER', 'bro')", new HashMap<>(), context);
-    Assert.assertEquals(goodBroParserConfig, out);
+    SensorParserConfig actual = SensorParserConfig.fromBytes(out.getBytes());
+    SensorParserConfig expected = SensorParserConfig.fromBytes(goodBroParserConfig.getBytes());
+    assertEquals(expected, actual);
   }
 
+  /**
+   * The CONFIG_GET function should NOT return any configuration when the
+   * Parser configuration for a given sensor is missing AND emptyIfNotPresent = false.
+   */
   @Test
-  public void testParserGetMissWithoutDefault() {
+  public void testGetParserMissWithoutDefault() {
 
-    {
-      Object out = run("CONFIG_GET('PARSER', 'brop', false)", new HashMap<>(), context);
-      Assert.assertNull(out);
-    }
+    // expect null because emptyIfNotPresent = false
+    Object out = run("CONFIG_GET('PARSER', 'sensor', false)", context);
+    assertNull(out);
   }
 
+  /**
+   * The CONFIG_GET function should return a default configuration when none
+   * currently exists.
+   */
   @Test
-  public void testParserGetMissWithDefault() throws Exception {
-    JSONObject expected = (JSONObject) new JSONParser().parse(defaultBropParserConfig);
+  public void testGetParserMissWithDefault() throws Exception {
 
+    SensorParserConfig expected = new SensorParserConfig();
     {
-      Object out = run("CONFIG_GET('PARSER', 'brop')", new HashMap<>(), context);
-      JSONObject actual = (JSONObject) new JSONParser().parse(out.toString().trim());
-      Assert.assertEquals(expected, actual);
+      Object out = run("CONFIG_GET('PARSER', 'sensor')", context);
+      SensorParserConfig actual = SensorParserConfig.fromBytes(out.toString().getBytes());
+      assertEquals(expected, actual);
     }
     {
-      Object out = run("CONFIG_GET('PARSER', 'brop', true)", new HashMap<>(), context);
-      JSONObject actual = (JSONObject) new JSONParser().parse(out.toString().trim());
-      Assert.assertEquals(expected, actual);
+      Object out = run("CONFIG_GET('PARSER', 'sensor', true)", context);
+      SensorParserConfig actual = SensorParserConfig.fromBytes(out.toString().getBytes());
+      assertEquals(expected, actual);
     }
   }
 
-  static String goodTestEnrichmentConfig = slurp( SAMPLE_CONFIG_PATH + "/enrichments/test.json");
+  /**
+   * The CONFIG_GET function should be able to return the Enrichment configuration
+   * for a given sensor.
+   */
+  @Test
+  public void testGetEnrichment() throws Exception {
+
+    String out = (String) run("CONFIG_GET('ENRICHMENT', 'test')", context);
+
+    SensorEnrichmentConfig actual = SensorEnrichmentConfig.fromBytes(out.getBytes());
+    SensorEnrichmentConfig expected = SensorEnrichmentConfig.fromBytes(goodTestEnrichmentConfig.getBytes());
+    assertEquals(expected, actual);
+  }
+
+  /**
+   * No default configuration should be provided in this case.
+   */
+  @Test
+  public void testGetEnrichmentMissWithoutDefault() {
+
+    // expect null because emptyIfNotPresent = false
+    Object out = run("CONFIG_GET('ENRICHMENT', 'sense', false)", context);
+    assertNull(out);
+  }
 
   /**
+   * A default empty configuration should be provided, if one does not exist.
+   */
+  @Test
+  public void testGetEnrichmentMissWithDefault() throws Exception {
+
+    // expect an empty configuration to be returned
+    SensorEnrichmentConfig expected = new SensorEnrichmentConfig();
     {
-      "enrichment" : {
-        "fieldMap" : { },
-        "fieldToTypeMap" : { },
-        "config" : { }
-      },
-      "threatIntel" : {
-        "fieldMap" : { },
-        "fieldToTypeMap" : { },
-        "config" : { },
-        "triageConfig" : {
-          "riskLevelRules" : [ ],
-          "aggregator" : "MAX",
-          "aggregationConfig" : { }
-        }
-      },
-      "configuration" : { }
+      String out = (String) run("CONFIG_GET('ENRICHMENT', 'missing-sensor')", context);
+      SensorEnrichmentConfig actual = SensorEnrichmentConfig.fromBytes(out.getBytes());
+      assertEquals(expected, actual);
+    }
+    {
+      String out = (String) run("CONFIG_GET('ENRICHMENT', 'missing-sensor', true)", context);
+      SensorEnrichmentConfig actual = SensorEnrichmentConfig.fromBytes(out.getBytes());
+      assertEquals(expected, actual);
     }
+  }
+
+  /**
+   * The CONFIG_GET function should be able to return the Indexing configuration
+   * for a given sensor.
    */
-  @Multiline
-  static String defaultBropEnrichmentConfig;
+  @Test
+  public void testGetIndexing() throws Exception {
 
+    String out = (String) run("CONFIG_GET('INDEXING', 'test')", context);
+
+    Map<String, Object> actual = toJSONObject(out);
+    Map<String, Object> expected = toJSONObject(goodTestIndexingConfig);
+    assertEquals(expected, actual);
+  }
 
+  /**
+   * No default configuration should be provided in this case.
+   */
   @Test
-  public void testEnrichmentGetHappyPath() {
+  public void testGetIndexingMissWithoutDefault() {
 
-    Object out = run("CONFIG_GET('ENRICHMENT', 'test')", new HashMap<>(), context);
-    Assert.assertEquals(goodTestEnrichmentConfig, out.toString().trim());
+    // expect null because emptyIfNotPresent = false
+    Object out = run("CONFIG_GET('INDEXING', 'sense', false)", context);
+    assertNull(out);
   }
 
+  /**
+   * A default empty configuration should be provided, if one does not exist.
+   */
   @Test
-  public void testEnrichmentGetMissWithoutDefault() {
+  public void testGetIndexingtMissWithDefault() throws Exception {
 
+    // expect an empty configuration to be returned
+    Map<String, Object> expected = Collections.emptyMap();
+    {
+      String out = (String) run("CONFIG_GET('INDEXING', 'missing-sensor')", context);
+      Map<String, Object> actual = toJSONObject(out);
+      assertEquals(expected, actual);
+    }
     {
-      Object out = run("CONFIG_GET('ENRICHMENT', 'brop', false)", new HashMap<>(), context);
-      Assert.assertNull(out);
+      String out = (String) run("CONFIG_GET('INDEXING', 'missing-sensor', true)", context);
+      Map<String, Object> actual = toJSONObject(out);
+      assertEquals(expected, actual);
     }
   }
 
+  /**
+   * The CONFIG_GET function should be able to return the Profiler configuration.
+   */
+  @Test
+  public void testGetProfiler() throws Exception {
+
+    String out = (String) run("CONFIG_GET('PROFILER')", context);
+
+    ProfilerConfig actual = ProfilerConfig.fromBytes(out.getBytes());
+    ProfilerConfig expected = ProfilerConfig.fromBytes(goodProfilerConfig.getBytes());
+    assertEquals(expected, actual);
+  }
+
+  /**
+   * No default configuration should be provided in this case.
+   */
   @Test
-  public void testEnrichmentGetMissWithDefault() throws Exception {
-    JSONObject expected = (JSONObject) new JSONParser().parse(defaultBropEnrichmentConfig);
+  public void testGetProfilerMissWithoutDefault() throws Exception {
+
+    deletePath(PROFILER.getZookeeperRoot());
 
+    // expect null because emptyIfNotPresent = false
+    String out = (String) run("CONFIG_GET('PROFILER', false)", context);
+    assertNull(out);
+  }
+
+  /**
+   * A default empty configuration should be provided, if one does not exist.
+   */
+  @Test
+  public void testGetProfilerMissWithDefault() throws Exception {
+
+    // there is no profiler config in zookeeper
+    deletePath(PROFILER.getZookeeperRoot());
+
+    // expect an empty configuration to be returned
+    ProfilerConfig expected = new ProfilerConfig();
     {
-      Object out = run("CONFIG_GET('ENRICHMENT', 'brop')", new HashMap<>(), context);
-      JSONObject actual = (JSONObject) new JSONParser().parse(out.toString().trim());
-      Assert.assertEquals(expected, actual);
+      String out = (String) run("CONFIG_GET('PROFILER', true)", context);
+      ProfilerConfig actual = ProfilerConfig.fromJSON(out);
+      assertEquals(expected, actual);
     }
     {
-      Object out = run("CONFIG_GET('ENRICHMENT', 'brop', true)", new HashMap<>(), context);
-      JSONObject actual = (JSONObject) new JSONParser().parse(out.toString().trim());
-      Assert.assertEquals(expected, actual);
+      String out = (String) run("CONFIG_GET('PROFILER')", context);
+      ProfilerConfig actual = ProfilerConfig.fromJSON(out);
+      assertEquals(expected, actual);
     }
   }
 
-  static String goodGlobalConfig = slurp( SAMPLE_CONFIG_PATH+ "/global.json");
+  @Test
+  public void testGetGlobal() throws Exception {
+
+    String out = (String) run("CONFIG_GET('GLOBAL')", context);
+
+    Map<String, Object> actual = toJSONObject(out);
+    Map<String, Object> expected = toJSONObject(goodGlobalConfig);
+    assertEquals(expected, actual);
+  }
+
+  /**
+   * No default configuration should be provided in this case.
+   */
+  @Test
+  public void testGetGlobalMissWithoutDefault() throws Exception {
+
+    // there is no global config in zookeeper
+    deletePath(GLOBAL.getZookeeperRoot());
+
+    // expect null because emptyIfNotPresent = false
+    Object out = run("CONFIG_GET('GLOBAL', false)", context);
+    assertNull(out);
+  }
 
+  /**
+   * A default empty configuration should be provided, if one does not exist.
+   */
   @Test
-  public void testGlobalGet() {
+  public void testGetGlobalMissWithDefault() throws Exception {
+
+    // there is no global config in zookeeper
+    deletePath(GLOBAL.getZookeeperRoot());
 
-    Object out = run("CONFIG_GET('GLOBAL')", new HashMap<>(), context);
-    Assert.assertEquals(goodGlobalConfig, out.toString().trim());
+    // expect an empty configuration to be returned
+    Map<String, Object> expected = Collections.emptyMap();
+    {
+      String out = (String) run("CONFIG_GET('GLOBAL')", context);
+      Map<String, Object> actual = toJSONObject(out);
+      assertEquals(expected, actual);
+    }
+    {
+      String out = (String) run("CONFIG_GET('GLOBAL', true)", context);
+      Map<String, Object> actual = toJSONObject(out);
+      assertEquals(expected, actual);
+    }
   }
 
   @Test
-  public void testGlobalPut() {
+  public void testPutGlobal() throws Exception {
+
+    String out = (String) run("CONFIG_GET('GLOBAL')", context);
 
-    Object out = run("CONFIG_GET('GLOBAL')", new HashMap<>(), context);
-    Assert.assertEquals(goodGlobalConfig, out.toString().trim());
+    Map<String, Object> actual = toJSONObject(out);
+    Map<String, Object> expected = toJSONObject(goodGlobalConfig);
+    assertEquals(expected, actual);
   }
 
   @Test(expected=ParseException.class)
-  public void testGlobalPutBad() {
+  public void testPutGlobalBad() {
     {
       UnitTestHelper.setLog4jLevel(ConfigurationFunctions.class, Level.FATAL);
       try {
-        run("CONFIG_PUT('GLOBAL', 'foo bar')", new HashMap<>(), context);
+        run("CONFIG_PUT('GLOBAL', 'foo bar')", context);
       } catch(ParseException e) {
         UnitTestHelper.setLog4jLevel(ConfigurationFunctions.class, Level.ERROR);
         throw e;
@@ -215,23 +414,23 @@ public class ConfigurationFunctionsTest {
   }
 
   @Test
-  public void testIndexingPut() throws InterruptedException {
-    String brop= (String) run("CONFIG_GET('INDEXING', 'testIndexingPut')", new HashMap<>(), context);
+  public void testPutIndexing() throws InterruptedException {
+    String brop= (String) run("CONFIG_GET('INDEXING', 'testIndexingPut')", context);
     run("CONFIG_PUT('INDEXING', config, 'testIndexingPut')", ImmutableMap.of("config", brop), context);
     boolean foundMatch = false;
     for(int i = 0;i < 10 && !foundMatch;++i) {
-      String bropNew = (String) run("CONFIG_GET('INDEXING', 'testIndexingPut', false)", new HashMap<>(), context);
+      String bropNew = (String) run("CONFIG_GET('INDEXING', 'testIndexingPut', false)", context);
       foundMatch =  brop.equals(bropNew);
       if(foundMatch) {
         break;
       }
       Thread.sleep(2000);
     }
-    Assert.assertTrue(foundMatch);
+    assertTrue(foundMatch);
   }
 
   @Test(expected= ParseException.class)
-  public void testIndexingPutBad() throws InterruptedException {
+  public void testPutIndexingBad() throws InterruptedException {
     {
       {
         UnitTestHelper.setLog4jLevel(ConfigurationFunctions.class, Level.FATAL);
@@ -246,23 +445,26 @@ public class ConfigurationFunctionsTest {
   }
 
   @Test
-  public void testEnrichmentPut() throws InterruptedException {
-    String brop= (String) run("CONFIG_GET('ENRICHMENT', 'testEnrichmentPut')", new HashMap<>(), context);
-    run("CONFIG_PUT('ENRICHMENT', config, 'testEnrichmentPut')", ImmutableMap.of("config", brop), context);
+  public void testPutEnrichment() throws InterruptedException {
+    String config = (String) run("CONFIG_GET('ENRICHMENT', 'sensor')", context);
+    assertNotNull(config);
+
+    run("CONFIG_PUT('ENRICHMENT', config, 'sensor')", ImmutableMap.of("config", config), context);
+
     boolean foundMatch = false;
     for(int i = 0;i < 10 && !foundMatch;++i) {
-      String bropNew = (String) run("CONFIG_GET('ENRICHMENT', 'testEnrichmentPut', false)", new HashMap<>(), context);
-      foundMatch =  brop.equals(bropNew);
+      String newConfig = (String) run("CONFIG_GET('ENRICHMENT', 'sensor', false)", context);
+      foundMatch = config.equals(newConfig);
       if(foundMatch) {
         break;
       }
       Thread.sleep(2000);
     }
-    Assert.assertTrue(foundMatch);
+    assertTrue(foundMatch);
   }
 
   @Test(expected= ParseException.class)
-  public void testEnrichmentPutBad() throws InterruptedException {
+  public void testPutEnrichmentBad() throws InterruptedException {
     {
       {
         UnitTestHelper.setLog4jLevel(ConfigurationFunctions.class, Level.FATAL);
@@ -277,23 +479,23 @@ public class ConfigurationFunctionsTest {
   }
 
   @Test
-  public void testParserPut() throws InterruptedException {
-    String brop= (String) run("CONFIG_GET('PARSER', 'testParserPut')", new HashMap<>(), context);
+  public void testPutParser() throws InterruptedException {
+    String brop= (String) run("CONFIG_GET('PARSER', 'testParserPut')", context);
     run("CONFIG_PUT('PARSER', config, 'testParserPut')", ImmutableMap.of("config", brop), context);
     boolean foundMatch = false;
     for(int i = 0;i < 10 && !foundMatch;++i) {
-      String bropNew = (String) run("CONFIG_GET('PARSER', 'testParserPut', false)", new HashMap<>(), context);
+      String bropNew = (String) run("CONFIG_GET('PARSER', 'testParserPut', false)", context);
       foundMatch =  brop.equals(bropNew);
       if(foundMatch) {
         break;
       }
       Thread.sleep(2000);
     }
-    Assert.assertTrue(foundMatch);
+    assertTrue(foundMatch);
   }
 
   @Test(expected= ParseException.class)
-  public void testParserPutBad() throws InterruptedException {
+  public void testPutParserBad() throws InterruptedException {
     {
       UnitTestHelper.setLog4jLevel(ConfigurationFunctions.class, Level.FATAL);
       try {

http://git-wip-us.apache.org/repos/asf/metron/blob/37e3fd32/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java
index 781a0cf..352ae2b 100644
--- a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java
+++ b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java
@@ -52,7 +52,6 @@ import java.io.ByteArrayInputStream;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -342,15 +341,18 @@ public class DefaultStellarShellExecutor implements StellarShellExecutor {
    * @param zkClient An optional Zookeeper client.
    */
   private Context createContext(Properties properties, Optional<CuratorFramework> zkClient) throws Exception {
+
     Context.Builder contextBuilder = new Context.Builder();
     Map<String, Object> globals;
     if (zkClient.isPresent()) {
+      LOG.debug("Zookeeper client present; fetching globals from Zookeeper.");
 
       // fetch globals from zookeeper
       globals = fetchGlobalConfig(zkClient.get());
       contextBuilder.with(ZOOKEEPER_CLIENT, () -> zkClient.get());
 
     } else {
+      LOG.debug("No Zookeeper client; initializing empty globals.");
 
       // use empty globals to allow a user to '%define' their own
       globals = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/metron/blob/37e3fd32/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/StellarProcessorUtils.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/StellarProcessorUtils.java b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/StellarProcessorUtils.java
index 5912657..d5f267e 100644
--- a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/StellarProcessorUtils.java
+++ b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/StellarProcessorUtils.java
@@ -18,17 +18,18 @@
 
 package org.apache.metron.stellar.common.utils;
 
+import com.google.common.collect.ImmutableList;
+import org.apache.metron.stellar.common.StellarPredicateProcessor;
+import org.apache.metron.stellar.common.StellarProcessor;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.stellar.dsl.DefaultVariableResolver;
 import org.apache.metron.stellar.dsl.MapVariableResolver;
 import org.apache.metron.stellar.dsl.StellarFunctions;
 import org.apache.metron.stellar.dsl.VariableResolver;
-import com.google.common.collect.ImmutableList;
-import org.apache.metron.stellar.common.StellarPredicateProcessor;
-import org.apache.metron.stellar.common.StellarProcessor;
 import org.junit.Assert;
 
 import java.util.AbstractMap;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Spliterators;
@@ -39,39 +40,76 @@ import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
 
+/**
+ * Utilities for executing and validating Stellar expressions.
+ */
 public class StellarProcessorUtils {
 
-    /**
-     * This utility class is intended for use while unit testing Stellar operators.
-     * It is included in the "main" code so third-party operators will not need
-     * a test dependency on Stellar's test-jar.
-     *
-     * This class ensures the basic contract of a stellar expression is adhered to:
-     * 1. Validate works on the expression
-     * 2. The output can be serialized and deserialized properly
-     *
-     * @param rule
-     * @param variables
-     * @param context
-     * @return ret
-     */
-    public static Object run(String rule, Map<String, Object> variables, Context context) {
-        StellarProcessor processor = new StellarProcessor();
-        Assert.assertTrue(rule + " not valid.", processor.validate(rule, context));
-        Object ret = processor.parse(rule, new DefaultVariableResolver(x -> variables.get(x),x-> variables.containsKey(x)), StellarFunctions.FUNCTION_RESOLVER(), context);
-        byte[] raw = SerDeUtils.toBytes(ret);
-        Object actual = SerDeUtils.fromBytes(raw, Object.class);
-        Assert.assertEquals(ret, actual);
-        return ret;
-    }
+  /**
+   * Execute and validate a Stellar expression.
+   *
+   * <p>This is intended for use while unit testing Stellar expressions.  This ensures that the expression
+   * validates successfully and produces a result that can be serialized correctly.
+   *
+   * @param expression The expression to execute.
+   * @param variables The variables to expose to the expression.
+   * @param context The execution context.
+   * @return The result of executing the expression.
+   */
+  public static Object run(String expression, Map<String, Object> variables, Context context) {
+
+    // validate the expression
+    StellarProcessor processor = new StellarProcessor();
+    Assert.assertTrue("Invalid expression; expr=" + expression,
+            processor.validate(expression, context));
+
+    // execute the expression
+    Object ret = processor.parse(
+            expression,
+            new DefaultVariableResolver(x -> variables.get(x), x -> variables.containsKey(x)),
+            StellarFunctions.FUNCTION_RESOLVER(),
+            context);
+
+    // ensure the result can be serialized/deserialized
+    byte[] raw = SerDeUtils.toBytes(ret);
+    Object actual = SerDeUtils.fromBytes(raw, Object.class);
+    Assert.assertEquals(ret, actual);
+
+    return ret;
+  }
+
+  /**
+   * Execute and validate a Stellar expression.
+   *
+   * <p>This is intended for use while unit testing Stellar expressions.  This ensures that the expression
+   * validates successfully and produces a result that can be serialized correctly.
+   *
+   * @param expression The expression to execute.
+   * @param variables The variables to expose to the expression.
+   * @return The result of executing the expression.
+   */
+  public static Object run(String expression, Map<String, Object> variables) {
+    return run(expression, variables, Context.EMPTY_CONTEXT());
+  }
 
-  public static Object run(String rule, Map<String, Object> variables) {
-    return run(rule, variables, Context.EMPTY_CONTEXT());
+  /**
+   * Execute and validate a Stellar expression.
+   *
+   * <p>This is intended for use while unit testing Stellar expressions.  This ensures that the expression
+   * validates successfully and produces a result that can be serialized correctly.
+   *
+   * @param expression The expression to execute.
+   * @param context The execution context.
+   * @return The result of executing the expression.
+   */
+  public static Object run(String expression, Context context) {
+    return run(expression, Collections.emptyMap(), context);
   }
 
-  public static void validate(String rule, Context context) {
+  public static void validate(String expression, Context context) {
     StellarProcessor processor = new StellarProcessor();
-    Assert.assertTrue(rule + " not valid.", processor.validate(rule, context));
+    Assert.assertTrue("Invalid expression; expr=" + expression,
+            processor.validate(expression, context));
   }
 
   public static void validate(String rule) {
@@ -101,19 +139,18 @@ public class StellarProcessorUtils {
   }
 
   public static void runWithArguments(String function, List<Object> arguments, Object expected) {
-    Supplier<Stream<Map.Entry<String, Object>>> kvStream = () -> StreamSupport.stream(new XRange(arguments.size()), false)
-            .map( i -> new AbstractMap.SimpleImmutableEntry<>("var" + i, arguments.get(i)));
+    Supplier<Stream<Map.Entry<String, Object>>> kvStream = () -> StreamSupport
+            .stream(new XRange(arguments.size()), false)
+            .map(i -> new AbstractMap.SimpleImmutableEntry<>("var" + i, arguments.get(i)));
 
-    String args = kvStream.get().map( kv -> kv.getKey())
-                                .collect(Collectors.joining(","));
+    String args = kvStream.get().map(kv -> kv.getKey()).collect(Collectors.joining(","));
     Map<String, Object> variables = kvStream.get().collect(Collectors.toMap(kv -> kv.getKey(), kv -> kv.getValue()));
-    String stellarStatement =  function + "(" + args + ")";
+    String stellarStatement = function + "(" + args + ")";
     String reason = stellarStatement + " != " + expected + " with variables: " + variables;
 
-    if(expected instanceof Double) {
-      Assert.assertEquals(reason, (Double)expected, (Double)run(stellarStatement, variables), 1e-6);
-    }
-    else {
+    if (expected instanceof Double) {
+      Assert.assertEquals(reason, (Double) expected, (Double) run(stellarStatement, variables), 1e-6);
+    } else {
       Assert.assertEquals(reason, expected, run(stellarStatement, variables));
     }
   }
@@ -135,10 +172,9 @@ public class StellarProcessorUtils {
     @Override
     public boolean tryAdvance(IntConsumer action) {
       boolean isDone = i >= end;
-      if(isDone) {
+      if (isDone) {
         return false;
-      }
-      else {
+      } else {
         action.accept(i);
         i++;
         return true;
@@ -148,25 +184,20 @@ public class StellarProcessorUtils {
     /**
      * {@inheritDoc}
      *
-     * @param action
-     * to {@code IntConsumer} and passed to
-     * {@link #tryAdvance(IntConsumer)}; otherwise
-     * the action is adapted to an instance of {@code IntConsumer}, by
-     * boxing the argument of {@code IntConsumer}, and then passed to
-     * {@link #tryAdvance(IntConsumer)}.
+     * @param action to {@code IntConsumer} and passed to {@link #tryAdvance(IntConsumer)};
+     *     otherwise the action is adapted to an instance of {@code IntConsumer}, by boxing the
+     *     argument of {@code IntConsumer}, and then passed to {@link #tryAdvance(IntConsumer)}.
      */
     @Override
     public boolean tryAdvance(Consumer<? super Integer> action) {
       boolean isDone = i >= end;
-      if(isDone) {
+      if (isDone) {
         return false;
-      }
-      else {
+      } else {
         action.accept(i);
         i++;
         return true;
       }
     }
   }
-
 }


[28/50] [abbrv] metron git commit: METRON-1465:Support for Elasticsearch X-pack (wardbekker via mmiklavc) closes apache/metron#946

Posted by rm...@apache.org.
METRON-1465:Support for Elasticsearch X-pack (wardbekker via mmiklavc) closes apache/metron#946


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/a8b555dc
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/a8b555dc
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/a8b555dc

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: a8b555dcc9f548d7b91789a46d9435b4d8b17581
Parents: 3ba9ae2
Author: wardbekker <wa...@wardbekker.com>
Authored: Mon Apr 9 13:14:13 2018 -0600
Committer: Michael Miklavcic <mi...@gmail.com>
Committed: Mon Apr 9 13:14:13 2018 -0600

----------------------------------------------------------------------
 metron-deployment/Kerberos-manual-setup.md      | 209 +++++++++++++++++++
 .../roles/metron-builder/tasks/build-debs.yml   |   2 +-
 .../roles/metron-builder/tasks/build-rpms.yml   |   2 +-
 .../METRON/CURRENT/configuration/metron-env.xml |   2 -
 .../metron-rest/src/main/scripts/metron-rest.sh |   9 +
 .../src/main/config/zookeeper/global.json       |   5 +-
 .../apache/metron/common/utils/HDFSUtils.java   |  59 ++++++
 .../metron/common/utils/ReflectionUtils.java    |  66 +++++-
 .../elasticsearch/dao/ElasticsearchDao.java     |  33 ++-
 .../elasticsearch/utils/ElasticsearchUtils.java | 107 ++++++++--
 .../writer/ElasticsearchWriter.java             |   8 +-
 .../scripts/start_elasticsearch_topology.sh     |   8 +-
 .../writer/ElasticsearchWriterTest.java         |  19 +-
 .../stellar/common/utils/ConversionUtils.java   |  19 +-
 14 files changed, 486 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-deployment/Kerberos-manual-setup.md
----------------------------------------------------------------------
diff --git a/metron-deployment/Kerberos-manual-setup.md b/metron-deployment/Kerberos-manual-setup.md
index 47a63d8..456703a 100644
--- a/metron-deployment/Kerberos-manual-setup.md
+++ b/metron-deployment/Kerberos-manual-setup.md
@@ -30,6 +30,7 @@ This document provides instructions for kerberizing Metron's Vagrant-based devel
 * [Start Metron](#start-metron)
 * [Push Data](#push-data)
 * [More Information](#more-information)
+* [Elasticseach X-Pack](#x-pack)
 
 Setup
 -----
@@ -533,3 +534,211 @@ In order to correct this, you should:
 ### References
 
 * [https://github.com/apache/storm/blob/master/SECURITY.md](https://github.com/apache/storm/blob/master/SECURITY.md)
+
+X-Pack
+------
+
+First, stop the random_access_indexing topology through the Storm UI or from the CLI, e.g.
+
+```
+storm kill random_access_indexing
+```
+
+Here are instructions for enabling X-Pack with Elasticsearch and Kibana: https://www.elastic.co/guide/en/x-pack/5.6/installing-xpack.html
+
+You need to be sure to add the appropriate username and password for Elasticsearch and Kibana to enable external connections from Metron components. e.g. the following will create a user "transport_client_user" with password "changeme" and "superuser" credentials.
+
+```
+sudo /usr/share/elasticsearch/bin/x-pack/users useradd transport_client_user -p changeme -r superuser
+```
+
+Once you've picked a password to connect to ES, you need to upload a 1-line file to HDFS with that password in it. Metron will use this file to securely read the password in order to connect to ES securely.
+
+Here is an example using "changeme" as the password
+
+```
+echo changeme > /tmp/xpack-password
+sudo -u hdfs hdfs dfs -mkdir /apps/metron/elasticsearch/
+sudo -u hdfs hdfs dfs -put /tmp/xpack-password /apps/metron/elasticsearch/
+sudo -u hdfs hdfs dfs -chown metron:metron /apps/metron/elasticsearch/xpack-password
+```
+
+New settings have been added to configure the Elasticsearch client. By default the client will run as the normal ES prebuilt transport client. If you enable X-Pack you should set the es.client.class as shown below.
+
+Add the es settings to global.json
+
+```
+/usr/metron/0.4.3/config/zookeeper/global.json ->
+
+  "es.client.settings" : {
+      "es.client.class" : "org.elasticsearch.xpack.client.PreBuiltXPackTransportClient",
+      "es.xpack.username" : "transport_client_user",
+      "es.xpack.password.file" : "/apps/metron/elasticsearch/xpack-password"
+  }
+```
+
+Submit the update to Zookeeper
+
+```
+$METRON_HOME/bin/zk_load_configs.sh -m PUSH -i METRON_HOME/config/zookeeper/ -z $ZOOKEEPER
+```
+
+The last step before restarting the topology is to create a custom X-Pack shaded and relocated jar. This is up to you because of licensing restrictions, but here is a sample Maven pom file that should help.
+
+```
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software
+	Foundation (ASF) under one or more contributor license agreements. See the
+	NOTICE file distributed with this work for additional information regarding
+	copyright ownership. The ASF licenses this file to You under the Apache License,
+	Version 2.0 (the "License"); you may not use this file except in compliance
+	with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+	Unless required by applicable law or agreed to in writing, software distributed
+	under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
+	OR CONDITIONS OF ANY KIND, either express or implied. See the License for
+  the specific language governing permissions and limitations under the License.
+  -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.elasticsearch</groupId>
+    <artifactId>elasticsearch-xpack-shaded</artifactId>
+    <name>elasticsearch-xpack-shaded</name>
+    <packaging>jar</packaging>
+    <version>5.6.2</version>
+    <repositories>
+        <repository>
+            <id>elasticsearch-releases</id>
+            <url>https://artifacts.elastic.co/maven</url>
+            <releases>
+                <enabled>true</enabled>
+            </releases>
+            <snapshots>
+                <enabled>false</enabled>
+            </snapshots>
+        </repository>
+    </repositories>
+    <dependencies>
+        <dependency>
+            <groupId>org.elasticsearch.client</groupId>
+            <artifactId>x-pack-transport</artifactId>
+            <version>5.6.2</version>
+            <exclusions>
+              <exclusion>
+                <groupId>com.fasterxml.jackson.dataformat</groupId>
+                <artifactId>jackson-dataformat-smile</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>com.fasterxml.jackson.dataformat</groupId>
+                <artifactId>jackson-dataformat-yaml</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>com.fasterxml.jackson.dataformat</groupId>
+                <artifactId>jackson-dataformat-cbor</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>com.fasterxml.jackson.core</groupId>
+                <artifactId>jackson-core</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.slf4j</groupId>
+                <artifactId>slf4j-api</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>org.slf4j</groupId>
+                <artifactId>slf4j-log4j12</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>log4j</groupId>
+                <artifactId>log4j</artifactId>
+              </exclusion>
+              <exclusion> <!-- this is causing a weird build error if not excluded - Error creating shaded jar: null: IllegalArgumentException -->
+                    <groupId>org.apache.logging.log4j</groupId>
+                    <artifactId>log4j-api</artifactId>
+                </exclusion>
+            </exclusions>
+          </dependency>
+    </dependencies>
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-shade-plugin</artifactId>
+                <version>2.4.3</version>
+                <configuration>
+                    <createDependencyReducedPom>true</createDependencyReducedPom>
+                </configuration>
+                <executions>
+                    <execution>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                        <configuration>
+                          <filters>
+                            <filter>
+                              <artifact>*:*</artifact>
+                              <excludes>
+                                <exclude>META-INF/*.SF</exclude>
+                                <exclude>META-INF/*.DSA</exclude>
+                                <exclude>META-INF/*.RSA</exclude>
+                              </excludes>
+                            </filter>
+                          </filters>
+                          <relocations>
+				<relocation>
+                                    <pattern>io.netty</pattern>
+                                    <shadedPattern>org.apache.metron.io.netty</shadedPattern>
+                                </relocation>
+                                <relocation>
+                                    <pattern>org.apache.logging.log4j</pattern>
+                                    <shadedPattern>org.apache.metron.logging.log4j</shadedPattern>
+                                </relocation>
+                            </relocations>
+                            <artifactSet>
+                                <excludes>
+                                    <exclude>org.slf4j.impl*</exclude>
+                                    <exclude>org.slf4j:slf4j-log4j*</exclude>
+                                </excludes>
+                            </artifactSet>
+                            <transformers>
+                                <transformer
+                                  implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                                     <resources>
+                                        <resource>.yaml</resource>
+                                        <resource>LICENSE.txt</resource>
+                                        <resource>ASL2.0</resource>
+                                        <resource>NOTICE.txt</resource>
+                                      </resources>
+                                </transformer>
+                                <transformer
+                                        implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
+                                <transformer
+                                        implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
+                                    <mainClass></mainClass>
+                                </transformer>
+                            </transformers>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
+```
+
+Once you've built the elasticsearch-xpack-shaded-5.6.2.jar, it needs to be made available to Storm when you submit the topology. Create a contrib directory for indexing and put the jar file in this directory.
+
+```
+/usr/metron/0.4.3/indexing_contrib/elasticsearch-xpack-shaded-5.6.2.jar
+```
+
+Now you can restart the Elasticsearch topology. Note, you should perform this step manually, as follows.
+
+```
+$METRON_HOME/bin/start_elasticsearch_topology.sh
+```
+
+Once you've performed these steps, you shoud be able to start seeing data in your ES indexes.

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-deployment/ansible/roles/metron-builder/tasks/build-debs.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/ansible/roles/metron-builder/tasks/build-debs.yml b/metron-deployment/ansible/roles/metron-builder/tasks/build-debs.yml
index 4949196..01ab565 100644
--- a/metron-deployment/ansible/roles/metron-builder/tasks/build-debs.yml
+++ b/metron-deployment/ansible/roles/metron-builder/tasks/build-debs.yml
@@ -20,7 +20,7 @@
   args:
     chdir: "{{ metron_build_dir }}/metron-deployment"
   with_items:
-    - mvn package -DskipTests -Pbuild-debs
+    - mvn package -DskipTests -Pbuild-debs -T 2C
   become: false
   run_once: true
   delegate_to: localhost

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-deployment/ansible/roles/metron-builder/tasks/build-rpms.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/ansible/roles/metron-builder/tasks/build-rpms.yml b/metron-deployment/ansible/roles/metron-builder/tasks/build-rpms.yml
index c362fc2..7a5b6bd 100644
--- a/metron-deployment/ansible/roles/metron-builder/tasks/build-rpms.yml
+++ b/metron-deployment/ansible/roles/metron-builder/tasks/build-rpms.yml
@@ -20,7 +20,7 @@
   args:
     chdir: "{{ metron_build_dir }}/metron-deployment"
   with_items:
-    - mvn package -DskipTests -Pbuild-rpms
+    - mvn package -DskipTests -Pbuild-rpms -T 2C
   become: false
   run_once: true
   delegate_to: localhost

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml
index 87866e8..5c49799 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-env.xml
@@ -137,6 +137,4 @@
         <value>yyyy.MM.dd.HH</value>
         <display-name>Elasticsearch Date Format</display-name>
     </property>
-
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-interface/metron-rest/src/main/scripts/metron-rest.sh
----------------------------------------------------------------------
diff --git a/metron-interface/metron-rest/src/main/scripts/metron-rest.sh b/metron-interface/metron-rest/src/main/scripts/metron-rest.sh
index f9a2b69..c293566 100644
--- a/metron-interface/metron-rest/src/main/scripts/metron-rest.sh
+++ b/metron-interface/metron-rest/src/main/scripts/metron-rest.sh
@@ -36,6 +36,7 @@ METRON_SYSCONFIG="${METRON_SYSCONFIG:-/etc/default/metron}"
 METRON_LOG_DIR="${METRON_LOG_DIR:-/var/log/metron}"
 METRON_PID_FILE="${METRON_PID_FILE:-/var/run/metron/metron-rest.pid}"
 PARSER_CONTRIB=${PARSER_CONTRIB:-$METRON_HOME/parser_contrib}
+INDEXING_CONTRIB=${INDEXING_CONTRIB:-$METRON_HOME/indexing_contrib}
 PARSER_LIB=$(find $METRON_HOME/lib/ -name metron-parsers*.jar)
 
 echo "METRON_VERSION=${METRON_VERSION}"
@@ -65,6 +66,14 @@ if [ -d "$PARSER_CONTRIB" ]; then
   METRON_REST_CLASSPATH+=":${contrib_classpath}"
 fi
 
+if [ -d "$INDEXING_CONTRIB" ]; then
+  contrib_jar_pattern="${INDEXING_CONTRIB}/*.jar"
+  contrib_list=( $contrib_jar_pattern ) # expand the glob to a list
+  contrib_classpath=$(join_by : "${contrib_list[@]}") #join the list by a colon
+  echo "Indexing Contrib jars are: $contrib_classpath"
+  METRON_REST_CLASSPATH+=":${contrib_classpath}"
+fi
+
 echo "METRON_SPRING_PROFILES_ACTIVE=${METRON_SPRING_PROFILES_ACTIVE}"
 
 # the vagrant Spring profile provides configuration values, otherwise configuration is provided by rest_application.yml

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-platform/metron-common/src/main/config/zookeeper/global.json
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/config/zookeeper/global.json b/metron-platform/metron-common/src/main/config/zookeeper/global.json
index dc7e71f..9e5402e 100644
--- a/metron-platform/metron-common/src/main/config/zookeeper/global.json
+++ b/metron-platform/metron-common/src/main/config/zookeeper/global.json
@@ -4,5 +4,8 @@
   "es.date.format": "yyyy.MM.dd.HH",
   "parser.error.topic": "indexing",
   "update.hbase.table": "metron_update",
-  "update.hbase.cf": "t"
+  "update.hbase.cf": "t",
+  "es.client.settings": {
+    "client.transport.ping_timeout": "500s"
+  }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/HDFSUtils.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/HDFSUtils.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/HDFSUtils.java
new file mode 100644
index 0000000..ee00b7e
--- /dev/null
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/HDFSUtils.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.common.utils;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+public class HDFSUtils {
+
+  /**
+   * Reads full HDFS FS file contents into a List of Strings. Initializes file system with default
+   * configuration. Opens and closes the file system on each call. Never null.
+   *
+   * @param path path to file
+   * @return file contents as a String
+   * @throws IOException
+   */
+  public static List<String> readFile(String path) throws IOException {
+    return readFile(new Configuration(), path);
+  }
+
+  /**
+   * Reads full HDFS FS file contents into a String. Opens and closes the file system on each call.
+   * Never null.
+   *
+   * @param config Hadoop configuration
+   * @param path path to file
+   * @return file contents as a String
+   * @throws IOException
+   */
+  public static List<String> readFile(Configuration config, String path) throws IOException {
+    FileSystem fs = FileSystem.newInstance(config);
+    Path hdfsPath = new Path(path);
+    FSDataInputStream inputStream = fs.open(hdfsPath);
+    return IOUtils.readLines(inputStream, "UTF-8");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/ReflectionUtils.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/ReflectionUtils.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/ReflectionUtils.java
index 144cdd9..ee6b041 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/ReflectionUtils.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/ReflectionUtils.java
@@ -23,19 +23,62 @@ public class ReflectionUtils {
 
   public static <T> T createInstance(String className, T defaultClass) {
     T instance;
-    if(className == null || className.length() == 0 || className.charAt(0) == '$') {
+    if (className == null || className.length() == 0 || className.charAt(0) == '$') {
       return defaultClass;
-    }
-    else {
+    } else {
       instance = createInstance(className);
     }
     return instance;
   }
 
+  /**
+   * Attempts to create instance from specified class name. No-arg constructor assumed.
+   *
+   * @param className fully qualified name of class to instantiate. e.g. foo.bar.Baz
+   * @param <T> Instance created from passed class
+   * @return Object of type T
+   */
+  public static <T> T createInstance(String className) {
+    T instance;
+    try {
+      Class<? extends T> clazz = (Class<? extends T>) Class.forName(className);
+      instance = createInstance(clazz);
+    } catch (ClassNotFoundException e) {
+      throw new IllegalStateException("Unable to instantiate connector: class not found", e);
+    }
+    return instance;
+  }
+
+
+  /**
+   * Create instance from no-args constructor
+   *
+   * @param clazz Class to create instance from
+   * @param <T> Instance created from passed class
+   * @return Object of type T
+   */
   public static <T> T createInstance(Class<? extends T> clazz) {
+    return createInstance(clazz, null, null);
+  }
+
+  /**
+   * Create instance from passed class with specified parameter types and arguments. If parameter
+   * types is null, defaults to attempting to instantiate the no-arg constructor.
+   *
+   * @param clazz Class to create instance from
+   * @param parameterTypes parameter types to use for looking up the desired constructor
+   * @param parameters arguments to pass into the constructor when instantiating the object.
+   * @param <T> Instance created from passed class
+   * @return Object of type T
+   */
+  public static <T> T createInstance(Class<? extends T> clazz, Class<?>[] parameterTypes, Object[] parameters) {
     T instance;
     try {
-      instance = clazz.getConstructor().newInstance();
+      if (parameterTypes != null) {
+        instance = clazz.getConstructor(parameterTypes).newInstance(parameters);
+      } else {
+        instance = clazz.getConstructor().newInstance();
+      }
     } catch (InstantiationException e) {
       throw new IllegalStateException("Unable to instantiate connector.", e);
     } catch (IllegalAccessException e) {
@@ -47,11 +90,22 @@ public class ReflectionUtils {
     }
     return instance;
   }
-  public static <T> T createInstance(String className) {
+
+  /**
+   * Create instance from passed class name with specified parameter types and arguments. If parameter
+   * types is null, defaults to attempting to instantiate the no-arg constructor.
+   *
+   * @param clazz Class to create instance from
+   * @param parameterTypes parameter types to use for looking up the desired constructor
+   * @param parameters arguments to pass into the constructor when instantiating the object.
+   * @param <T> Instance created from passed class
+   * @return Object of type T
+   */
+  public static <T> T createInstance(String className, Class<?>[] parameterTypes, Object[] parameters) {
     T instance;
     try {
       Class<? extends T> clazz = (Class<? extends T>) Class.forName(className);
-      instance = createInstance(clazz);
+      instance = createInstance(clazz, parameterTypes, parameters);
     } catch (ClassNotFoundException e) {
       throw new IllegalStateException("Unable to instantiate connector: class not found", e);
     }

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
index 26e5731..cb5bb58 100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
@@ -17,8 +17,23 @@
  */
 package org.apache.metron.elasticsearch.dao;
 
+import static org.apache.metron.elasticsearch.utils.ElasticsearchUtils.INDEX_NAME_DELIMITER;
+
 import com.google.common.base.Splitter;
 import com.google.common.collect.Iterables;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
 import org.apache.metron.elasticsearch.utils.ElasticsearchUtils;
 import org.apache.metron.indexing.dao.AccessConfig;
 import org.apache.metron.indexing.dao.IndexDao;
@@ -65,22 +80,6 @@ import org.elasticsearch.search.sort.FieldSortBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.function.Function;
-
-import static org.apache.metron.elasticsearch.utils.ElasticsearchUtils.INDEX_NAME_DELIMITER;
-
 public class ElasticsearchDao implements IndexDao {
 
   private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -391,7 +390,7 @@ public class ElasticsearchDao implements IndexDao {
   @Override
   public synchronized void init(AccessConfig config) {
     if(this.client == null) {
-      this.client = ElasticsearchUtils.getClient(config.getGlobalConfigSupplier().get(), config.getOptionalSettings());
+      this.client = ElasticsearchUtils.getClient(config.getGlobalConfigSupplier().get());
       this.accessConfig = config;
       this.columnMetadataDao = new ElasticsearchColumnMetadataDao(this.client.admin());
       this.requestSubmitter = new ElasticsearchRequestSubmitter(this.client);

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/utils/ElasticsearchUtils.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/utils/ElasticsearchUtils.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/utils/ElasticsearchUtils.java
index 4b73b84..24f7a27 100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/utils/ElasticsearchUtils.java
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/utils/ElasticsearchUtils.java
@@ -22,6 +22,7 @@ import static java.lang.String.format;
 import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Iterables;
+import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
@@ -30,25 +31,34 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 import org.apache.commons.lang.StringUtils;
 import org.apache.metron.common.configuration.writer.WriterConfiguration;
+import org.apache.metron.common.utils.HDFSUtils;
+import org.apache.metron.common.utils.ReflectionUtils;
 import org.apache.metron.netty.utils.NettyRuntimeWrapper;
+import org.apache.metron.stellar.common.utils.ConversionUtils;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.elasticsearch.client.transport.TransportClient;
 import org.elasticsearch.common.bytes.BytesReference;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.transport.InetSocketTransportAddress;
 import org.elasticsearch.common.xcontent.XContentHelper;
-import org.elasticsearch.transport.client.PreBuiltTransportClient;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class ElasticsearchUtils {
 
   private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+  private static final String ES_CLIENT_CLASS_DEFAULT = "org.elasticsearch.transport.client.PreBuiltTransportClient";
+  private static final String PWD_FILE_CONFIG_KEY = "es.xpack.password.file";
+  private static final String USERNAME_CONFIG_KEY = "es.xpack.username";
+  private static final String TRANSPORT_CLIENT_USER_KEY = "xpack.security.user";
+
 
   private static ThreadLocal<Map<String, SimpleDateFormat>> DATE_FORMAT_CACHE
           = ThreadLocal.withInitial(() -> new HashMap<>());
@@ -107,32 +117,103 @@ public class ElasticsearchUtils {
     return parts[0];
   }
 
-  public static TransportClient getClient(Map<String, Object> globalConfiguration, Map<String, String> optionalSettings) {
+  /**
+   * Instantiates an Elasticsearch client based on es.client.class, if set. Defaults to
+   * org.elasticsearch.transport.client.PreBuiltTransportClient.
+   *
+   * @param globalConfiguration Metron global config
+   * @return
+   */
+  public static TransportClient getClient(Map<String, Object> globalConfiguration) {
+    Set<String> customESSettings = new HashSet<>();
+    customESSettings.addAll(Arrays.asList("es.client.class", USERNAME_CONFIG_KEY, PWD_FILE_CONFIG_KEY));
     Settings.Builder settingsBuilder = Settings.builder();
-    settingsBuilder.put("cluster.name", globalConfiguration.get("es.clustername"));
-    settingsBuilder.put("client.transport.ping_timeout","500s");
-    if (optionalSettings != null) {
-      settingsBuilder.put(optionalSettings);
+    Map<String, String> esSettings = getEsSettings(globalConfiguration);
+    for (Map.Entry<String, String> entry : esSettings.entrySet()) {
+      String key = entry.getKey();
+      String value = entry.getValue();
+      if (!customESSettings.contains(key)) {
+        settingsBuilder.put(key, value);
+      }
     }
-    Settings settings = settingsBuilder.build();
-    TransportClient client;
-    try{
+    settingsBuilder.put("cluster.name", globalConfiguration.get("es.clustername"));
+    settingsBuilder.put("client.transport.ping_timeout", esSettings.getOrDefault("client.transport.ping_timeout","500s"));
+    setXPackSecurityOrNone(settingsBuilder, esSettings);
+
+    try {
       LOG.info("Number of available processors in Netty: {}", NettyRuntimeWrapper.availableProcessors());
       // Netty sets available processors statically and if an attempt is made to set it more than
       // once an IllegalStateException is thrown by NettyRuntime.setAvailableProcessors(NettyRuntime.java:87)
       // https://discuss.elastic.co/t/getting-availableprocessors-is-already-set-to-1-rejecting-1-illegalstateexception-exception/103082
       // https://discuss.elastic.co/t/elasticsearch-5-4-1-availableprocessors-is-already-set/88036
       System.setProperty("es.set.netty.runtime.available.processors", "false");
-      client = new PreBuiltTransportClient(settings);
-      for(HostnamePort hp : getIps(globalConfiguration)) {
+      TransportClient client = createTransportClient(settingsBuilder.build(), esSettings);
+      for (HostnamePort hp : getIps(globalConfiguration)) {
         client.addTransportAddress(
                 new InetSocketTransportAddress(InetAddress.getByName(hp.hostname), hp.port)
         );
       }
-    } catch (UnknownHostException exception){
+      return client;
+    } catch (UnknownHostException exception) {
       throw new RuntimeException(exception);
     }
-    return client;
+  }
+
+  private static Map<String, String> getEsSettings(Map<String, Object> config) {
+    return ConversionUtils
+        .convertMap((Map<String, Object>) config.getOrDefault("es.client.settings", new HashMap<String, Object>()),
+            String.class);
+  }
+
+  /*
+   * Append Xpack security settings (if any)
+   */
+  private static void setXPackSecurityOrNone(Settings.Builder settingsBuilder, Map<String, String> esSettings) {
+
+    if (esSettings.containsKey(PWD_FILE_CONFIG_KEY)) {
+
+      if (!esSettings.containsKey(USERNAME_CONFIG_KEY) || StringUtils.isEmpty(esSettings.get(USERNAME_CONFIG_KEY))) {
+        throw new IllegalArgumentException("X-pack username is required and cannot be empty");
+      }
+
+      settingsBuilder.put(
+         TRANSPORT_CLIENT_USER_KEY,
+         esSettings.get(USERNAME_CONFIG_KEY) + ":" + getPasswordFromFile(esSettings.get(PWD_FILE_CONFIG_KEY))
+      );
+    }
+  }
+
+  /*
+   * Single password on first line
+   */
+  private static String getPasswordFromFile(String hdfsPath) {
+    List<String> lines = null;
+    try {
+      lines = HDFSUtils.readFile(hdfsPath);
+    } catch (IOException e) {
+      throw new IllegalArgumentException(
+          format("Unable to read XPack password file from HDFS location '%s'", hdfsPath), e);
+    }
+    if (lines.size() == 0) {
+      throw new IllegalArgumentException(format("No password found in file '%s'", hdfsPath));
+    }
+    return lines.get(0);
+  }
+
+  /**
+   * Constructs ES transport client from the provided ES settings additional es config
+   *
+   * @param settings client settings
+   * @param esSettings client type to instantiate
+   * @return client with provided settings
+   */
+  private static TransportClient createTransportClient(Settings settings,
+      Map<String, String> esSettings) {
+    String esClientClassName = (String) esSettings
+        .getOrDefault("es.client.class", ES_CLIENT_CLASS_DEFAULT);
+    return ReflectionUtils
+        .createInstance(esClientClassName, new Class[]{Settings.class, Class[].class},
+            new Object[]{settings, new Class[0]});
   }
 
   public static class HostnamePort {

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriter.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriter.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriter.java
index 143bcf7..5959623 100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriter.java
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriter.java
@@ -42,21 +42,15 @@ import org.slf4j.LoggerFactory;
 
 public class ElasticsearchWriter implements BulkMessageWriter<JSONObject>, Serializable {
 
-  private Map<String, String> optionalSettings;
   private transient TransportClient client;
   private SimpleDateFormat dateFormat;
   private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchWriter.class);
   private FieldNameConverter fieldNameConverter = new ElasticsearchFieldNameConverter();
 
-  public ElasticsearchWriter withOptionalSettings(Map<String, String> optionalSettings) {
-    this.optionalSettings = optionalSettings;
-    return this;
-  }
-
   @Override
   public void init(Map stormConf, TopologyContext topologyContext, WriterConfiguration configurations) {
     Map<String, Object> globalConfiguration = configurations.getGlobalConfig();
-    client = ElasticsearchUtils.getClient(globalConfiguration, optionalSettings);
+    client = ElasticsearchUtils.getClient(globalConfiguration);
     dateFormat = ElasticsearchUtils.getIndexFormat(globalConfiguration);
   }
 

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-platform/metron-elasticsearch/src/main/scripts/start_elasticsearch_topology.sh
----------------------------------------------------------------------
diff --git a/metron-platform/metron-elasticsearch/src/main/scripts/start_elasticsearch_topology.sh b/metron-platform/metron-elasticsearch/src/main/scripts/start_elasticsearch_topology.sh
index 1b473e9..20ce23f 100755
--- a/metron-platform/metron-elasticsearch/src/main/scripts/start_elasticsearch_topology.sh
+++ b/metron-platform/metron-elasticsearch/src/main/scripts/start_elasticsearch_topology.sh
@@ -19,4 +19,10 @@
 METRON_VERSION=${project.version}
 METRON_HOME=/usr/metron/$METRON_VERSION
 TOPOLOGY_JAR=${project.artifactId}-$METRON_VERSION-uber.jar
-storm jar $METRON_HOME/lib/$TOPOLOGY_JAR org.apache.storm.flux.Flux --remote $METRON_HOME/flux/indexing/random_access/remote.yaml --filter $METRON_HOME/config/elasticsearch.properties
+INDEXING_CONTRIB=${INDEXING_CONTRIB:-$METRON_HOME/indexing_contrib}
+if [ -d "$INDEXING_CONTRIB" ]; then
+  export EXTRA_JARS=$(ls -m $INDEXING_CONTRIB/*.jar | tr -d ' ' | tr -d '\n' | sed 's/\/\//\//g')
+  storm jar $METRON_HOME/lib/$TOPOLOGY_JAR org.apache.storm.flux.Flux --remote $METRON_HOME/flux/indexing/random_access/remote.yaml --filter $METRON_HOME/config/elasticsearch.properties --jars "$EXTRA_JARS"
+else
+  storm jar $METRON_HOME/lib/$TOPOLOGY_JAR org.apache.storm.flux.Flux --remote $METRON_HOME/flux/indexing/random_access/remote.yaml --filter $METRON_HOME/config/elasticsearch.properties
+fi

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriterTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriterTest.java b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriterTest.java
index 9aff560..6a3638b 100644
--- a/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriterTest.java
+++ b/metron-platform/metron-elasticsearch/src/test/java/org/apache/metron/elasticsearch/writer/ElasticsearchWriterTest.java
@@ -18,23 +18,20 @@
 
 package org.apache.metron.elasticsearch.writer;
 
-import org.apache.storm.tuple.Tuple;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import com.google.common.collect.ImmutableList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
 import org.apache.metron.common.writer.BulkWriterResponse;
+import org.apache.storm.tuple.Tuple;
 import org.elasticsearch.action.bulk.BulkItemResponse;
 import org.elasticsearch.action.bulk.BulkResponse;
 import org.junit.Test;
 
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.junit.Assert.*;
-
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
 public class ElasticsearchWriterTest {
     @Test
     public void testSingleSuccesses() throws Exception {

http://git-wip-us.apache.org/repos/asf/metron/blob/a8b555dc/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/ConversionUtils.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/ConversionUtils.java b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/ConversionUtils.java
index b53097f..783afae 100644
--- a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/ConversionUtils.java
+++ b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/utils/ConversionUtils.java
@@ -19,11 +19,12 @@
 package org.apache.metron.stellar.common.utils;
 
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import java.util.List;
+import java.util.Map;
 import org.apache.commons.beanutils.BeanUtilsBean2;
 import org.apache.commons.beanutils.ConvertUtilsBean;
 
-import java.util.List;
-
 public class ConversionUtils {
   private static ThreadLocal<ConvertUtilsBean> UTILS_BEAN = new ThreadLocal<ConvertUtilsBean>() {
     @Override
@@ -55,4 +56,18 @@ public class ConversionUtils {
     return Lists.transform(from, s -> convert(s, clazz));
   }
 
+  /**
+   * Performs naive Map type conversion on values. Key types remain unchanged.
+   *
+   * @param from Source map
+   * @param clazz Class type to cast the Map values to
+   * @param <K> Map key type
+   * @param <V1> Source value type
+   * @param <V2> Desired value type
+   * @return New Map with the values cast to the desired type
+   */
+  public static <K, V1, V2> Map<K, V2> convertMap(Map<K, V1> from, Class<V2> clazz) {
+    return Maps.transformValues(from, s -> convert(s, clazz));
+  }
+
 }


[44/50] [abbrv] metron git commit: METRON-1528: Fix missing file in metron.spec (mmiklavc via mmiklavc) closes apache/metron#996

Posted by rm...@apache.org.
METRON-1528: Fix missing file in metron.spec (mmiklavc via mmiklavc) closes apache/metron#996


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/a41611b1
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/a41611b1
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/a41611b1

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: a41611b1ae2bb17fa9333ef6f965749652e95538
Parents: e094914
Author: mmiklavc <mi...@gmail.com>
Authored: Wed Apr 18 07:35:01 2018 -0600
Committer: Michael Miklavcic <mi...@gmail.com>
Committed: Wed Apr 18 07:35:01 2018 -0600

----------------------------------------------------------------------
 metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/a41611b1/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
index 1f40105..15469d9 100644
--- a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
+++ b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
@@ -123,6 +123,7 @@ This package installs the Metron common files %{metron_home}
 %dir %{metron_home}/lib
 %{metron_home}/bin/zk_load_configs.sh
 %{metron_home}/bin/stellar
+%{metron_home}/bin/cluster_info.py
 %{metron_home}/config/zookeeper/global.json
 %attr(0644,root,root) %{metron_home}/lib/metron-common-%{full_version}.jar
 


[20/50] [abbrv] metron git commit: METRON-1462: Separate ES and Kibana from Metron Mpack (mmiklavc via mmiklavc) closes apache/metron#943

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboardindex.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboardindex.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboardindex.py
deleted file mode 100755
index f0903ac..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboardindex.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/python
-#
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#  contributor license agreements.  See the NOTICE file distributed with
-#  this work for additional information regarding copyright ownership.
-#  The ASF licenses this file to You under the Apache License, Version 2.0
-#  (the "License"); you may not use this file except in compliance with
-#  the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-
-from elasticsearch import Elasticsearch
-from elasticsearch.helpers import bulk
-import cPickle as pickle
-import argparse, sys, os.path
-import errno
-import os
-
-
-class DashboardIndex(object):
-
-    def __init__(self, host='localhost', port=9200, url_prefix='', timeout=10, **kwargs):
-        """
-        :arg host: hostname of the node (default: localhost)
-        :arg port: port to use (integer, default: 9200)
-        :arg url_prefix: optional url prefix for elasticsearch
-        :arg timeout: default timeout in seconds (float, default: 10)
-        """
-        self.es = Elasticsearch([{'host':host,'port': port, 'url_prefix': url_prefix, 'timeout':timeout}])
-
-    def get(self):
-        """
-        Get .kibana index from Elasticsearch
-        """
-        dotkibana = self.es.search(index='.kibana', size = 100)
-        return dotkibana['hits']['hits']
-
-    def load(self,filespec):
-        """
-        Save Index data on local filesystem
-        :args filespec: path/filename for saved file
-        """
-        data=[]
-        with open(filespec,'rb') as fp:
-            data = pickle.load(fp)
-        return data
-
-    def save(self,filename,data):
-        """
-        Save Index data on local filesystem
-        :args filespec: path/filename for saved file
-        """
-        with open(filename,'wb') as fp:
-            pickle.dump(data,fp)
-
-    def put(self,data):
-        """
-        Bulk write data to Elasticsearch
-        :args data: data to be written (note: index name is specified in data)
-        """
-        bulk(self.es,data)
-
-    def main(self,args):
-
-        if args.save:
-            print("running save with host:%s on port %d, filespec: %s" % (args.hostname, args.port, args.filespec))
-            self.save(filename=args.filespec,data=di.get())
-        else:
-            """
-            Loads Kibana Dashboard definition from disk and replaces .kibana on index
-            :args filespec: path/filename for saved file
-            """
-            if not os.path.isfile(args.filespec):
-                raise IOError(
-                    errno.ENOENT, os.strerror(errno.ENOENT), args.filespec)
-            self.es.indices.delete(index='.kibana', ignore=[400, 404])
-            self.put(data=di.load(filespec=args.filespec))
-
-if __name__ == '__main__':
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument("hostname", help="ES Hostname or IP", type=str)
-    parser.add_argument("port", help="ES Port", type=int)
-    parser.add_argument("filespec", help="file to be pushed from or saved to", type=str)
-    parser.add_argument("-s","--save", help="run in SAVE mode - .kibana will be read and saved to filespec",action="store_true")
-    args = parser.parse_args()
-    di = DashboardIndex(host=args.hostname,port=args.port)
-    di.main(args)

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/kibana.template
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/kibana.template b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/kibana.template
deleted file mode 100644
index 6f38ed5..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/kibana.template
+++ /dev/null
@@ -1,233 +0,0 @@
-{
-  "template" : ".kibana",
-    "mappings" : {
-      "search" : {
-        "dynamic" : "strict",
-        "properties" : {
-          "columns" : {
-            "type" : "keyword"
-          },
-          "description" : {
-            "type" : "text"
-          },
-          "hits" : {
-            "type" : "integer"
-          },
-          "kibanaSavedObjectMeta" : {
-            "properties" : {
-              "searchSourceJSON" : {
-                "type" : "text"
-              }
-            }
-          },
-          "sort" : {
-            "type" : "keyword"
-          },
-          "title" : {
-            "type" : "text"
-          },
-          "version" : {
-            "type" : "integer"
-          }
-        }
-      },
-      "url" : {
-        "dynamic" : "strict",
-        "properties" : {
-          "accessCount" : {
-            "type" : "long"
-          },
-          "accessDate" : {
-            "type" : "date"
-          },
-          "createDate" : {
-            "type" : "date"
-          },
-          "url" : {
-            "type" : "text",
-            "fields" : {
-              "keyword" : {
-                "type" : "keyword",
-                "ignore_above" : 2048
-              }
-            }
-          }
-        }
-      },
-      "dashboard" : {
-        "dynamic" : "strict",
-        "properties" : {
-          "description" : {
-            "type" : "text"
-          },
-          "hits" : {
-            "type" : "integer"
-          },
-          "kibanaSavedObjectMeta" : {
-            "properties" : {
-              "searchSourceJSON" : {
-                "type" : "text"
-              }
-            }
-          },
-          "optionsJSON" : {
-            "type" : "text"
-          },
-          "panelsJSON" : {
-            "type" : "text"
-          },
-          "refreshInterval" : {
-            "properties" : {
-              "display" : {
-                "type" : "keyword"
-              },
-              "pause" : {
-                "type" : "boolean"
-              },
-              "section" : {
-                "type" : "integer"
-              },
-              "value" : {
-                "type" : "integer"
-              }
-            }
-          },
-          "timeFrom" : {
-            "type" : "keyword"
-          },
-          "timeRestore" : {
-            "type" : "boolean"
-          },
-          "timeTo" : {
-            "type" : "keyword"
-          },
-          "title" : {
-            "type" : "text"
-          },
-          "uiStateJSON" : {
-            "type" : "text"
-          },
-          "version" : {
-            "type" : "integer"
-          }
-        }
-      },
-      "index-pattern" : {
-        "dynamic" : "strict",
-        "properties" : {
-          "fieldFormatMap" : {
-            "type" : "text"
-          },
-          "fields" : {
-            "type" : "text"
-          },
-          "intervalName" : {
-            "type" : "keyword"
-          },
-          "notExpandable" : {
-            "type" : "boolean"
-          },
-          "sourceFilters" : {
-            "type" : "text"
-          },
-          "timeFieldName" : {
-            "type" : "keyword"
-          },
-          "title" : {
-            "type" : "text"
-          }
-        }
-      },
-      "timelion-sheet" : {
-        "dynamic" : "strict",
-        "properties" : {
-          "description" : {
-            "type" : "text"
-          },
-          "hits" : {
-            "type" : "integer"
-          },
-          "kibanaSavedObjectMeta" : {
-            "properties" : {
-              "searchSourceJSON" : {
-                "type" : "text"
-              }
-            }
-          },
-          "timelion_chart_height" : {
-            "type" : "integer"
-          },
-          "timelion_columns" : {
-            "type" : "integer"
-          },
-          "timelion_interval" : {
-            "type" : "keyword"
-          },
-          "timelion_other_interval" : {
-            "type" : "keyword"
-          },
-          "timelion_rows" : {
-            "type" : "integer"
-          },
-          "timelion_sheet" : {
-            "type" : "text"
-          },
-          "title" : {
-            "type" : "text"
-          },
-          "version" : {
-            "type" : "integer"
-          }
-        }
-      },
-      "visualization" : {
-        "dynamic" : "strict",
-        "properties" : {
-          "description" : {
-            "type" : "text"
-          },
-          "kibanaSavedObjectMeta" : {
-            "properties" : {
-              "searchSourceJSON" : {
-                "type" : "text"
-              }
-            }
-          },
-          "savedSearchId" : {
-            "type" : "keyword"
-          },
-          "title" : {
-            "type" : "text"
-          },
-          "uiStateJSON" : {
-            "type" : "text"
-          },
-          "version" : {
-            "type" : "integer"
-          },
-          "visState" : {
-            "type" : "text"
-          }
-        }
-      },
-      "server" : {
-        "dynamic" : "strict",
-        "properties" : {
-          "uuid" : {
-            "type" : "keyword"
-          }
-        }
-      },
-      "_default_" : {
-        "dynamic" : "strict"
-      },
-      "config" : {
-        "dynamic" : "true",
-        "properties" : {
-          "buildNum" : {
-            "type" : "keyword"
-          }
-        }
-      }
-    }
-}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/kibana_master.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/kibana_master.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/kibana_master.py
deleted file mode 100755
index a15f709..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/kibana_master.py
+++ /dev/null
@@ -1,119 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import errno
-import os
-
-from ambari_commons.os_check import OSCheck
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Directory
-from resource_management.core.resources.system import Execute
-from resource_management.core.resources.system import File
-from resource_management.core.source import InlineTemplate
-from resource_management.libraries.functions.format import format as ambari_format
-from resource_management.libraries.script import Script
-
-from common import service_check
-
-class Kibana(Script):
-
-    def install(self, env):
-        import params
-        env.set_params(params)
-        Logger.info("Installing Kibana")
-        self.install_packages(env)
-
-    def configure(self, env, upgrade_type=None, config_dir=None):
-        import params
-        env.set_params(params)
-        Logger.info("Configuring Kibana")
-
-        directories = [params.log_dir, params.pid_dir, params.conf_dir]
-        Directory(directories,
-                  mode=0755,
-                  owner=params.kibana_user,
-                  group=params.kibana_user
-                  )
-
-        File("{0}/kibana.yml".format(params.conf_dir),
-             owner=params.kibana_user,
-             content=InlineTemplate(params.kibana_yml_template)
-             )
-
-    def stop(self, env, upgrade_type=None):
-        import params
-        env.set_params(params)
-        Logger.info("Stopping Kibana")
-        Execute("service kibana stop")
-
-    def start(self, env, upgrade_type=None):
-        import params
-        env.set_params(params)
-        self.configure(env)
-        Logger.info("Starting Kibana")
-        Execute("service kibana start")
-
-    def restart(self, env):
-        import params
-        env.set_params(params)
-        self.configure(env)
-        Logger.info("Restarting Kibana")
-        Execute("service kibana restart")
-
-    def status(self, env):
-        import params
-        env.set_params(params)
-        Logger.info('Status check Kibana')
-        service_check("service kibana status", user=params.kibana_user, label="Kibana")
-
-    @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-    def load_template(self, env):
-        import params
-        env.set_params(params)
-
-        hostname = ambari_format("{es_host}")
-        port = int(ambari_format("{es_port}"))
-
-        Logger.info("Connecting to Elasticsearch on host: %s, port: %s" % (hostname, port))
-
-        kibanaTemplate = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dashboard', 'kibana.template')
-        if not os.path.isfile(kibanaTemplate):
-          raise IOError(
-              errno.ENOENT, os.strerror(errno.ENOENT), kibanaTemplate)
-
-        Logger.info("Loading .kibana index template from %s" % kibanaTemplate)
-        template_cmd = ambari_format(
-            'curl -s -XPOST http://{es_host}:{es_port}/_template/.kibana -d @%s' % kibanaTemplate)
-        Execute(template_cmd, logoutput=True)
-
-        kibanaDashboardLoad = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dashboard', 'dashboard-bulkload.json')
-        if not os.path.isfile(kibanaDashboardLoad):
-          raise IOError(
-              errno.ENOENT, os.strerror(errno.ENOENT), kibanaDashboardLoad)
-
-        Logger.info("Loading .kibana dashboard from %s" % kibanaDashboardLoad)
-
-        kibana_cmd = ambari_format(
-            'curl -s -H "Content-Type: application/x-ndjson" -XPOST http://{es_host}:{es_port}/.kibana/_bulk --data-binary @%s' % kibanaDashboardLoad)
-        Execute(kibana_cmd, logoutput=True)
-
-
-if __name__ == "__main__":
-    Kibana().execute()

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/params.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/params.py
deleted file mode 100755
index ef4cb62..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/params.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Kibana Params configurations
-
-"""
-
-from urlparse import urlparse
-
-from resource_management.libraries.functions import format
-from resource_management.libraries.script import Script
-
-# server configurations
-config = Script.get_config()
-
-kibana_home = '/usr/share/kibana/'
-kibana_bin = '/usr/share/kibana/bin/'
-
-conf_dir = "/etc/kibana"
-kibana_user = config['configurations']['kibana-env']['kibana_user']
-kibana_group = config['configurations']['kibana-env']['kibana_group']
-log_dir = config['configurations']['kibana-env']['kibana_log_dir']
-pid_dir = config['configurations']['kibana-env']['kibana_pid_dir']
-pid_file = format("{pid_dir}/kibanasearch.pid")
-es_url = config['configurations']['kibana-env']['kibana_es_url']
-parsed = urlparse(es_url)
-es_host = parsed.netloc.split(':')[0]
-es_port = parsed.netloc.split(':')[1]
-kibana_port = config['configurations']['kibana-env']['kibana_server_port']
-kibana_server_host = config['configurations']['kibana-env']['kibana_server_host']
-kibana_default_application = config['configurations']['kibana-env']['kibana_default_application']
-hostname = config['hostname']
-java64_home = config['hostLevelParams']['java_home']
-kibana_yml_template = config['configurations']['kibana-site']['content']
-

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/quicklinks/quicklinks.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/quicklinks/quicklinks.json
deleted file mode 100755
index 448e102..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,28 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"HTTP_ONLY"
-    },
-
-    "links": [
-      {
-        "name": "metron_ui",
-        "label": "Metron UI",
-        "requires_user_name": "false",
-        "component_name": "KIBANA_MASTER",
-        "url":"%@://%@:%@/",
-        "port":{
-          "http_property": "kibana_server_port",
-          "http_default_port": "5601",
-          "https_property": "kibana_server_port",
-          "https_default_port": "5601",
-          "regex": "^(\\d+)$",
-          "site": "kibana-env"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
index 6a20af8..38bd94a 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
@@ -211,6 +211,15 @@
                 </commandScript>
               </customCommand>
               <customCommand>
+                <name>KIBANA_DASHBOARD_INSTALL</name>
+                <background>false</background>
+                <commandScript>
+                  <script>scripts/indexing_master.py</script>
+                  <scriptType>PYTHON</scriptType>
+                  <timeout>600</timeout>
+                </commandScript>
+              </customCommand>
+              <customCommand>
                 <name>ZEPPELIN_NOTEBOOK_IMPORT</name>
                 <commandScript>
                   <script>scripts/indexing_master.py</script>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/__init__.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/__init__.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/__init__.py
new file mode 100755
index 0000000..8d2bad8
--- /dev/null
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/__init__.py
@@ -0,0 +1,16 @@
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
\ No newline at end of file


[02/50] [abbrv] metron git commit: METRON-1488: user_settings hbase table does not have acls set up for kerberos closes apache/metron#962

Posted by rm...@apache.org.
METRON-1488: user_settings hbase table does not have acls set up for kerberos closes apache/metron#962


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/26c5d306
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/26c5d306
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/26c5d306

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 26c5d3065bd7a635699083e8131d4a7629bf3b4d
Parents: 37662d3
Author: cstella <ce...@gmail.com>
Authored: Thu Mar 15 12:06:56 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Thu Mar 15 12:06:56 2018 -0400

----------------------------------------------------------------------
 .../common-services/METRON/CURRENT/package/scripts/rest_master.py  | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/26c5d306/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_master.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_master.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_master.py
index 36a68c6..2f2d3f9 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_master.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_master.py
@@ -51,6 +51,8 @@ class RestMaster(Script):
             commands.init_kafka_topics()
         if not commands.is_hbase_configured():
             commands.create_hbase_tables()
+        if params.security_enabled and not commands.is_hbase_acl_configured():
+            commands.set_hbase_acls()
         if params.security_enabled and not commands.is_kafka_acl_configured():
             commands.init_kafka_acls()
             commands.set_kafka_acl_configured()


[26/50] [abbrv] metron git commit: METRON-1505 Intermittent Profiler Integration Test Failure (nickwallen) closes apache/metron#977

Posted by rm...@apache.org.
METRON-1505 Intermittent Profiler Integration Test Failure (nickwallen) closes apache/metron#977


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/46bc63db
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/46bc63db
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/46bc63db

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 46bc63dbcfe9f0ddabfd4821958962a2dac9378e
Parents: ab4f8e6
Author: nickwallen <ni...@nickallen.org>
Authored: Sat Apr 7 11:28:01 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Sat Apr 7 11:28:01 2018 -0400

----------------------------------------------------------------------
 .../profiler/DefaultMessageDistributor.java     |  54 +++-
 .../src/main/flux/profiler/remote.yaml          |   2 +
 .../profiler/bolt/ProfileBuilderBolt.java       | 149 +++++++---
 .../profiler/bolt/ProfileSplitterBolt.java      |   1 -
 .../config/zookeeper/percentiles/profiler.json  |  12 -
 .../processing-time-test/profiler.json          |  11 +
 .../zookeeper/readme-example-1/profiler.json    |  17 --
 .../zookeeper/readme-example-2/profiler.json    |  18 --
 .../zookeeper/readme-example-3/profiler.json    |  11 -
 .../zookeeper/readme-example-4/profiler.json    |  11 -
 .../profiler/bolt/ProfileBuilderBoltTest.java   | 130 +++------
 .../integration/ProfilerIntegrationTest.java    | 274 +++++--------------
 .../configuration/profiler/ProfileConfig.java   |  49 ++--
 .../ZKConfigurationsCacheIntegrationTest.java   |   4 +-
 .../org/apache/metron/hbase/bolt/HBaseBolt.java |  22 +-
 15 files changed, 319 insertions(+), 446 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java
index ea5126f..70f4228 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java
@@ -25,6 +25,7 @@ import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.metron.common.configuration.profiler.ProfileConfig;
 import org.apache.metron.stellar.dsl.Context;
 import org.json.simple.JSONObject;
@@ -33,7 +34,6 @@ import org.slf4j.LoggerFactory;
 
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.ExecutionException;
@@ -74,7 +74,7 @@ public class DefaultMessageDistributor implements MessageDistributor {
    * messages.  Once it has not received messages for a period of time, it is
    * moved to the expired cache.
    */
-  private transient Cache<String, ProfileBuilder> activeCache;
+  private transient Cache<Integer, ProfileBuilder> activeCache;
 
   /**
    * A cache of expired profiles.
@@ -85,7 +85,7 @@ public class DefaultMessageDistributor implements MessageDistributor {
    * can flush the state of the expired profile.  If the client does not flush
    * the expired profiles, this state will be lost forever.
    */
-  private transient Cache<String, ProfileBuilder> expiredCache;
+  private transient Cache<Integer, ProfileBuilder> expiredCache;
 
   /**
    * Create a new message distributor.
@@ -222,7 +222,7 @@ public class DefaultMessageDistributor implements MessageDistributor {
    * @param cache The cache to flush.
    * @return The measurements captured when flushing the profiles.
    */
-  private List<ProfileMeasurement> flushCache(Cache<String, ProfileBuilder> cache) {
+  private List<ProfileMeasurement> flushCache(Cache<Integer, ProfileBuilder> cache) {
 
     List<ProfileMeasurement> measurements = new ArrayList<>();
     for(ProfileBuilder profileBuilder: cache.asMap().values()) {
@@ -262,11 +262,19 @@ public class DefaultMessageDistributor implements MessageDistributor {
   /**
    * Builds the key that is used to lookup the {@link ProfileBuilder} within the cache.
    *
+   * <p>The cache key is built using the hash codes of the profile and entity name.  If the profile
+   * definition is ever changed, the same cache entry will not be reused.  This ensures that no
+   * state can be carried over from the old definition into the new, which might result in an
+   * invalid profile measurement.
+   *
    * @param profile The profile definition.
    * @param entity The entity.
    */
-  private String cacheKey(ProfileConfig profile, String entity) {
-    return format("%s:%s", profile, entity);
+  private int cacheKey(ProfileConfig profile, String entity) {
+    return new HashCodeBuilder(17, 37)
+            .append(profile)
+            .append(entity)
+            .hashCode();
   }
 
   public DefaultMessageDistributor withPeriodDurationMillis(long periodDurationMillis) {
@@ -281,29 +289,45 @@ public class DefaultMessageDistributor implements MessageDistributor {
   /**
    * A listener that is notified when profiles expire from the active cache.
    */
-  private class ActiveCacheRemovalListener implements RemovalListener<String, ProfileBuilder> {
+  private class ActiveCacheRemovalListener implements RemovalListener<Integer, ProfileBuilder> {
 
     @Override
-    public void onRemoval(RemovalNotification<String, ProfileBuilder> notification) {
+    public void onRemoval(RemovalNotification<Integer, ProfileBuilder> notification) {
 
-      String key = notification.getKey();
       ProfileBuilder expired = notification.getValue();
+      LOG.warn("Profile expired from active cache; profile={}, entity={}",
+              expired.getDefinition().getProfile(),
+              expired.getEntity());
 
-      LOG.warn("Profile expired from active cache; key={}", key);
-      expiredCache.put(key, expired);
+      // add the profile to the expired cache
+      expiredCache.put(notification.getKey(), expired);
     }
   }
 
   /**
    * A listener that is notified when profiles expire from the active cache.
    */
-  private class ExpiredCacheRemovalListener implements RemovalListener<String, ProfileBuilder> {
+  private class ExpiredCacheRemovalListener implements RemovalListener<Integer, ProfileBuilder> {
 
     @Override
-    public void onRemoval(RemovalNotification<String, ProfileBuilder> notification) {
+    public void onRemoval(RemovalNotification<Integer, ProfileBuilder> notification) {
+
+      if(notification.wasEvicted()) {
+
+        // the expired profile was NOT flushed in time
+        ProfileBuilder expired = notification.getValue();
+        LOG.warn("Expired profile NOT flushed before removal, some state lost; profile={}, entity={}",
+                expired.getDefinition().getProfile(),
+                expired.getEntity());
 
-      String key = notification.getKey();
-      LOG.debug("Profile removed from expired cache; key={}", key);
+      } else {
+
+        // the expired profile was flushed successfully
+        ProfileBuilder expired = notification.getValue();
+        LOG.debug("Expired profile successfully flushed; profile={}, entity={}",
+                expired.getDefinition().getProfile(),
+                expired.getEntity());
+      }
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml b/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml
index 83c9fde..6ad007b 100644
--- a/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml
+++ b/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml
@@ -160,6 +160,8 @@ bolts:
               args: [ref: "windowLag"]
             - name: "withMaxNumberOfRoutes"
               args: [${profiler.max.routes.per.bolt}]
+            - name: "withTimestampField"
+              args: ["timestamp"]
 
     -   id: "hbaseBolt"
         className: "org.apache.metron.hbase.bolt.HBaseBolt"

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
index ffe823f..fb3d2d0 100644
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
@@ -42,13 +42,13 @@ import org.apache.metron.stellar.common.utils.ConversionUtils;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.zookeeper.SimpleEventListener;
 import org.apache.metron.zookeeper.ZKCache;
-import org.apache.storm.Config;
+import org.apache.storm.StormTimer;
 import org.apache.storm.task.OutputCollector;
 import org.apache.storm.task.TopologyContext;
 import org.apache.storm.topology.OutputFieldsDeclarer;
 import org.apache.storm.topology.base.BaseWindowedBolt;
 import org.apache.storm.tuple.Tuple;
-import org.apache.storm.utils.TupleUtils;
+import org.apache.storm.utils.Utils;
 import org.apache.storm.windowing.TupleWindow;
 import org.json.simple.JSONObject;
 import org.json.simple.parser.JSONParser;
@@ -60,6 +60,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import java.util.function.Supplier;
 
 import static java.lang.String.format;
 import static org.apache.metron.profiler.bolt.ProfileSplitterBolt.ENTITY_TUPLE_FIELD;
@@ -127,6 +129,9 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
 
   /**
    * Distributes messages to the profile builders.
+   *
+   * <p>Since expired profiles are flushed on a separate thread, all access to this
+   * {@code MessageDistributor} needs to be protected.
    */
   private MessageDistributor messageDistributor;
 
@@ -145,9 +150,21 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
   private List<ProfileMeasurementEmitter> emitters;
 
   /**
-   * Signals when it is time to flush.
+   * Signals when it is time to flush the active profiles.
+   */
+  private FlushSignal activeFlushSignal;
+
+  /**
+   * A timer that flushes expired profiles on a regular interval. The expired profiles
+   * are flushed on a separate thread.
+   *
+   * <p>Flushing expired profiles ensures that any profiles that stop receiving messages
+   * for an extended period of time will continue to be flushed.
+   *
+   * <p>This introduces concurrency issues as the bolt is no longer single threaded. Due
+   * to this, all access to the {@code MessageDistributor} needs to be protected.
    */
-  private FlushSignal flushSignal;
+  private StormTimer expiredFlushTimer;
 
   public ProfileBuilderBolt() {
     this.emitters = new ArrayList<>();
@@ -183,16 +200,26 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
     this.parser = new JSONParser();
     this.messageDistributor = new DefaultMessageDistributor(periodDurationMillis, profileTimeToLiveMillis, maxNumberOfRoutes);
     this.configurations = new ProfilerConfigurations();
-    this.flushSignal = new FixedFrequencyFlushSignal(periodDurationMillis);
+    this.activeFlushSignal = new FixedFrequencyFlushSignal(periodDurationMillis);
     setupZookeeper();
+    startExpiredFlushTimer();
   }
 
   @Override
   public void cleanup() {
-    zookeeperCache.close();
-    zookeeperClient.close();
+    try {
+      zookeeperCache.close();
+      zookeeperClient.close();
+      expiredFlushTimer.close();
+
+    } catch(Throwable e) {
+      LOG.error("Exception when cleaning up", e);
+    }
   }
 
+  /**
+   * Setup connectivity to Zookeeper which provides the necessary configuration for the bolt.
+   */
   private void setupZookeeper() {
     try {
       if (zookeeperClient == null) {
@@ -248,18 +275,6 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
     emitters.forEach(emitter -> emitter.declareOutputFields(declarer));
   }
 
-  /**
-   * Defines the frequency at which the bolt will receive tick tuples.  Tick tuples are
-   * used to control how often a profile is flushed.
-   */
-  @Override
-  public Map<String, Object> getComponentConfiguration() {
-
-    Map<String, Object> conf = super.getComponentConfiguration();
-    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, TimeUnit.MILLISECONDS.toSeconds(profileTimeToLiveMillis));
-    return conf;
-  }
-
   private Context getStellarContext() {
 
     Map<String, Object> global = getConfigurations().getGlobalConfig();
@@ -282,24 +297,12 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
 
       // handle each tuple in the window
       for(Tuple tuple : window.get()) {
-
-        if(TupleUtils.isTick(tuple)) {
-          handleTick();
-
-        } else {
-          handleMessage(tuple);
-        }
+        handleMessage(tuple);
       }
 
-      // time to flush?
-      if(flushSignal.isTimeToFlush()) {
-        flushSignal.reset();
-
-        // flush the active profiles
-        List<ProfileMeasurement> measurements = messageDistributor.flush();
-        emitMeasurements(measurements);
-
-        LOG.debug("Flushed active profiles and found {} measurement(s).", measurements.size());
+      // time to flush active profiles?
+      if(activeFlushSignal.isTimeToFlush()) {
+        flushActive();
       }
 
     } catch (Throwable e) {
@@ -310,17 +313,37 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
   }
 
   /**
-   * Flush all expired profiles when a 'tick' is received.
+   * Flush all active profiles.
+   */
+  protected void flushActive() {
+    activeFlushSignal.reset();
+
+    // flush the active profiles
+    List<ProfileMeasurement> measurements;
+    synchronized(messageDistributor) {
+      measurements = messageDistributor.flush();
+      emitMeasurements(measurements);
+    }
+
+    LOG.debug("Flushed active profiles and found {} measurement(s).", measurements.size());
+
+  }
+
+  /**
+   * Flushes all expired profiles.
    *
-   * If a profile has not received a message for an extended period of time then it is
+   * <p>If a profile has not received a message for an extended period of time then it is
    * marked as expired.  Periodically we need to flush these expired profiles to ensure
    * that their state is not lost.
    */
-  private void handleTick() {
+  protected void flushExpired() {
 
     // flush the expired profiles
-    List<ProfileMeasurement> measurements = messageDistributor.flushExpired();
-    emitMeasurements(measurements);
+    List<ProfileMeasurement> measurements;
+    synchronized (messageDistributor) {
+      measurements = messageDistributor.flushExpired();
+      emitMeasurements(measurements);
+    }
 
     LOG.debug("Flushed expired profiles and found {} measurement(s).", measurements.size());
   }
@@ -339,11 +362,13 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
     Long timestamp = getField(TIMESTAMP_TUPLE_FIELD, input, Long.class);
 
     // keep track of time
-    flushSignal.update(timestamp);
+    activeFlushSignal.update(timestamp);
     
     // distribute the message
     MessageRoute route = new MessageRoute(definition, entity);
-    messageDistributor.distribute(message, timestamp, route, getStellarContext());
+    synchronized (messageDistributor) {
+      messageDistributor.distribute(message, timestamp, route, getStellarContext());
+    }
 
     LOG.debug("Message distributed: profile={}, entity={}, timestamp={}", definition.getProfile(), entity, timestamp);
   }
@@ -395,10 +420,46 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
     return value;
   }
 
+  /**
+   * Converts milliseconds to seconds and handles an ugly cast.
+   *
+   * @param millis Duration in milliseconds.
+   * @return Duration in seconds.
+   */
+  private int toSeconds(long millis) {
+    return (int) TimeUnit.MILLISECONDS.toSeconds(millis);
+  }
+
+  /**
+   * Creates a timer that regularly flushes expired profiles on a separate thread.
+   */
+  private void startExpiredFlushTimer() {
+
+    expiredFlushTimer = createTimer("flush-expired-profiles-timer");
+    expiredFlushTimer.scheduleRecurring(0, toSeconds(profileTimeToLiveMillis), () -> flushExpired());
+  }
+
+  /**
+   * Creates a timer that can execute a task on a fixed interval.
+   *
+   * <p>If the timer encounters an exception, the entire process will be killed.
+   *
+   * @param name The name of the timer.
+   * @return The timer.
+   */
+  private StormTimer createTimer(String name) {
+
+    return new StormTimer(name, (thread, exception) -> {
+      String msg = String.format("Unexpected exception in timer task; timer=%s", name);
+      LOG.error(msg, exception);
+      Utils.exitProcess(1, msg);
+    });
+  }
+
   @Override
   public BaseWindowedBolt withTumblingWindow(BaseWindowedBolt.Duration duration) {
 
-    // need to capture the window duration for setting the flush count down
+    // need to capture the window duration to validate it along with other profiler settings
     this.windowDurationMillis = duration.value;
     return super.withTumblingWindow(duration);
   }
@@ -464,7 +525,7 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
   }
 
   public ProfileBuilderBolt withFlushSignal(FlushSignal flushSignal) {
-    this.flushSignal = flushSignal;
+    this.activeFlushSignal = flushSignal;
     return this;
   }
 

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
index 4e62eee..a92a432 100644
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
@@ -21,7 +21,6 @@
 package org.apache.metron.profiler.bolt;
 
 import org.apache.metron.common.bolt.ConfiguredProfilerBolt;
-import org.apache.metron.common.configuration.profiler.ProfileConfig;
 import org.apache.metron.common.configuration.profiler.ProfilerConfig;
 import org.apache.metron.profiler.DefaultMessageRouter;
 import org.apache.metron.profiler.MessageRoute;

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/test/config/zookeeper/percentiles/profiler.json
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/config/zookeeper/percentiles/profiler.json b/metron-analytics/metron-profiler/src/test/config/zookeeper/percentiles/profiler.json
deleted file mode 100644
index 8a54834..0000000
--- a/metron-analytics/metron-profiler/src/test/config/zookeeper/percentiles/profiler.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "profiles": [
-    {
-      "profile": "percentiles",
-      "foreach": "ip_src_addr",
-      "onlyif": "protocol == 'HTTP'",
-      "init":   { "s": "STATS_INIT(100)" },
-      "update": { "s": "STATS_ADD(s, length)" },
-      "result": "STATS_PERCENTILE(s, 0.7)"
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/test/config/zookeeper/processing-time-test/profiler.json
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/config/zookeeper/processing-time-test/profiler.json b/metron-analytics/metron-profiler/src/test/config/zookeeper/processing-time-test/profiler.json
new file mode 100644
index 0000000..e75ec0f
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/test/config/zookeeper/processing-time-test/profiler.json
@@ -0,0 +1,11 @@
+{
+  "profiles": [
+    {
+      "profile": "processing-time-test",
+      "foreach": "ip_src_addr",
+      "init":   { "counter": "0" },
+      "update": { "counter": "counter + 1" },
+      "result": "counter"
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-1/profiler.json
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-1/profiler.json b/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-1/profiler.json
deleted file mode 100644
index 96c60a1..0000000
--- a/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-1/profiler.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-  "profiles": [
-    {
-      "profile": "example1",
-      "foreach": "ip_src_addr",
-      "onlyif": "protocol == 'HTTP'",
-      "init": {
-        "total_bytes": 0.0
-      },
-      "update": {
-        "total_bytes": "total_bytes + bytes_in"
-      },
-      "result": "total_bytes",
-      "expires": 30
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-2/profiler.json
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-2/profiler.json b/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-2/profiler.json
deleted file mode 100644
index e5d8f39..0000000
--- a/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-2/profiler.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-  "profiles": [
-    {
-      "profile": "example2",
-      "foreach": "ip_src_addr",
-      "onlyif": "protocol == 'DNS' or protocol == 'HTTP'",
-      "init": {
-        "num_dns": 1.0,
-        "num_http": 1.0
-      },
-      "update": {
-        "num_dns": "num_dns + (if protocol == 'DNS' then 1 else 0)",
-        "num_http": "num_http + (if protocol == 'HTTP' then 1 else 0)"
-      },
-      "result": "num_dns / num_http"
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-3/profiler.json
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-3/profiler.json b/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-3/profiler.json
deleted file mode 100644
index 67cdefd..0000000
--- a/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-3/profiler.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "profiles": [
-    {
-      "profile": "example3",
-      "foreach": "ip_src_addr",
-      "onlyif": "protocol == 'HTTP'",
-      "update": { "s": "STATS_ADD(s, length)" },
-      "result": "STATS_MEAN(s)"
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-4/profiler.json
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-4/profiler.json b/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-4/profiler.json
deleted file mode 100644
index b003ce0..0000000
--- a/metron-analytics/metron-profiler/src/test/config/zookeeper/readme-example-4/profiler.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "profiles": [
-    {
-      "profile": "example4",
-      "foreach": "ip_src_addr",
-      "onlyif": "protocol == 'HTTP'",
-      "update": { "s": "STATS_ADD(s, length)" },
-      "result": "s"
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java
index 78e20e0..3d009fb 100644
--- a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java
@@ -48,7 +48,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.mock;
@@ -67,6 +66,7 @@ public class ProfileBuilderBoltTest extends BaseBoltTest {
   private ProfileConfig profile2;
   private ProfileMeasurementEmitter emitter;
   private ManualFlushSignal flushSignal;
+  private ProfileMeasurement measurement;
 
   @Before
   public void setup() throws Exception {
@@ -95,6 +95,12 @@ public class ProfileBuilderBoltTest extends BaseBoltTest {
             .withUpdate(Collections.singletonMap("x", "x + 1"))
             .withResult("x");
 
+    measurement = new ProfileMeasurement()
+            .withEntity("entity1")
+            .withProfileName("profile1")
+            .withPeriod(1000, 500, TimeUnit.MILLISECONDS)
+            .withProfileValue(22);
+
     flushSignal = new ManualFlushSignal();
     flushSignal.setFlushNow(false);
   }
@@ -127,23 +133,16 @@ public class ProfileBuilderBoltTest extends BaseBoltTest {
 
   /**
    * If the {@code FlushSignal} tells the bolt to flush, it should flush the {@code MessageDistributor}
-   * and emit the {@code ProfileMeasurement} values.
+   * and emit the {@code ProfileMeasurement} values from all active profiles.
    */
   @Test
-  public void testEmitWhenFlush() throws Exception {
+  public void testFlushActiveProfiles() throws Exception {
 
     ProfileBuilderBolt bolt = createBolt();
 
-    // create a profile measurement
-    ProfileMeasurement m = new ProfileMeasurement()
-            .withEntity("entity1")
-            .withProfileName("profile1")
-            .withPeriod(1000, 500, TimeUnit.MILLISECONDS)
-            .withProfileValue(22);
-
     // create a mock that returns the profile measurement above
     MessageDistributor distributor = mock(MessageDistributor.class);
-    when(distributor.flush()).thenReturn(Collections.singletonList(m));
+    when(distributor.flush()).thenReturn(Collections.singletonList(measurement));
     bolt.withMessageDistributor(distributor);
 
     // signal the bolt to flush
@@ -157,30 +156,23 @@ public class ProfileBuilderBoltTest extends BaseBoltTest {
     // a profile measurement should be emitted by the bolt
     List<ProfileMeasurement> measurements = getProfileMeasurements(outputCollector, 1);
     assertEquals(1, measurements.size());
-    assertEquals(m, measurements.get(0));
+    assertEquals(measurement, measurements.get(0));
   }
 
   /**
    * If the {@code FlushSignal} tells the bolt NOT to flush, nothing should be emitted.
    */
   @Test
-  public void testDoNotEmitWhenNoFlush() throws Exception {
+  public void testDoNotFlushActiveProfiles() throws Exception {
 
     ProfileBuilderBolt bolt = createBolt();
 
-    // create a profile measurement
-    ProfileMeasurement m = new ProfileMeasurement()
-            .withEntity("entity1")
-            .withProfileName("profile1")
-            .withPeriod(1000, 500, TimeUnit.MILLISECONDS)
-            .withProfileValue(22);
-
-    // create a mock that returns the profile measurement above
+    // create a mock where flush() returns the profile measurement above
     MessageDistributor distributor = mock(MessageDistributor.class);
-    when(distributor.flush()).thenReturn(Collections.singletonList(m));
+    when(distributor.flush()).thenReturn(Collections.singletonList(measurement));
     bolt.withMessageDistributor(distributor);
 
-    // no flush signal
+    // there is no flush signal
     flushSignal.setFlushNow(false);
 
     // execute the bolt
@@ -193,6 +185,29 @@ public class ProfileBuilderBoltTest extends BaseBoltTest {
   }
 
   /**
+   * Expired profiles should be flushed regularly, even if no input telemetry
+   * has been received.
+   */
+  @Test
+  public void testFlushExpiredProfiles() throws Exception {
+
+    ProfileBuilderBolt bolt = createBolt();
+
+    // create a mock where flushExpired() returns the profile measurement above
+    MessageDistributor distributor = mock(MessageDistributor.class);
+    when(distributor.flushExpired()).thenReturn(Collections.singletonList(measurement));
+    bolt.withMessageDistributor(distributor);
+
+    // execute test by flushing expired profiles. this is normally triggered by a timer task.
+    bolt.flushExpired();
+
+    // a profile measurement should be emitted by the bolt
+    List<ProfileMeasurement> measurements = getProfileMeasurements(outputCollector, 1);
+    assertEquals(1, measurements.size());
+    assertEquals(measurement, measurements.get(0));
+  }
+
+  /**
    * A {@link ProfileMeasurement} is built for each profile/entity pair.  The measurement should be emitted to each
    * destination defined by the profile. By default, a profile uses both Kafka and HBase as destinations.
    */
@@ -232,61 +247,6 @@ public class ProfileBuilderBoltTest extends BaseBoltTest {
     verify(outputCollector, times(1)).emit(eq("destination3"), any());
   }
 
-  @Test
-  public void testFlushExpiredWithTick() throws Exception {
-
-    ProfileBuilderBolt bolt = createBolt();
-
-    // create a mock
-    MessageDistributor distributor = mock(MessageDistributor.class);
-    bolt.withMessageDistributor(distributor);
-
-    // tell the bolt to flush on the first window
-    flushSignal.setFlushNow(true);
-
-    // execute the bolt; include a tick tuple in the window
-    Tuple tuple1 = createTuple("entity", message1, profile1, 100000000L);
-    TupleWindow tupleWindow = createWindow(tuple1, mockTickTuple());
-    bolt.execute(tupleWindow);
-
-    // ensure the expired profiles were flushed when the tick tuple was received
-    verify(distributor).flushExpired();
-  }
-
-  @Test
-  public void testFlushExpiredWithNoTick() throws Exception {
-
-    ProfileBuilderBolt bolt = createBolt();
-
-    // create a mock
-    MessageDistributor distributor = mock(MessageDistributor.class);
-    bolt.withMessageDistributor(distributor);
-
-    // tell the bolt to flush on the first window
-    flushSignal.setFlushNow(true);
-
-    // execute the bolt; NO tick tuple
-    Tuple tuple1 = createTuple("entity", message1, profile1, 100000000L);
-    TupleWindow tupleWindow = createWindow(tuple1);
-    bolt.execute(tupleWindow);
-
-    // there was no tick tuple; the expired profiles should NOT have been flushed
-    verify(distributor, times(0)).flushExpired();
-  }
-
-  /**
-   * Creates a mock tick tuple to use for testing.
-   * @return A mock tick tuple.
-   */
-  private Tuple mockTickTuple() {
-
-    Tuple tuple = mock(Tuple.class);
-    when(tuple.getSourceComponent()).thenReturn("__system");
-    when(tuple.getSourceStreamId()).thenReturn("__tick");
-
-    return tuple;
-  }
-
   /**
    * Retrieves the ProfileMeasurement(s) (if any) that have been emitted.
    *
@@ -334,18 +294,6 @@ public class ProfileBuilderBoltTest extends BaseBoltTest {
    */
   private ProfileBuilderBolt createBolt() throws IOException {
 
-    return createBolt(30, TimeUnit.SECONDS);
-  }
-
-  /**
-   * Create a ProfileBuilderBolt to test.
-   *
-   * @param windowDuration The event window duration.
-   * @param windowDurationUnits The units of the event window duration.
-   * @return A {@link ProfileBuilderBolt} to test.
-   */
-  private ProfileBuilderBolt createBolt(int windowDuration, TimeUnit windowDurationUnits) throws IOException {
-
     // defines the zk configurations accessible from the bolt
     ProfilerConfigurations configurations = new ProfilerConfigurations();
     configurations.updateGlobalConfig(Collections.emptyMap());
@@ -359,7 +307,7 @@ public class ProfileBuilderBoltTest extends BaseBoltTest {
             .withEmitter(emitter)
             .withProfilerConfigurations(configurations)
             .withPeriodDuration(1, TimeUnit.MINUTES)
-            .withTumblingWindow(new BaseWindowedBolt.Duration(windowDuration, windowDurationUnits));
+            .withTumblingWindow(new BaseWindowedBolt.Duration(30, TimeUnit.SECONDS));
     bolt.prepare(new HashMap<>(), topologyContext, outputCollector);
 
     // set the flush signal AFTER calling 'prepare'

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java
index c48a3e9..8f5ced3 100644
--- a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java
@@ -20,9 +20,6 @@
 
 package org.apache.metron.profiler.integration;
 
-import com.google.common.base.Joiner;
-import org.adrianwalker.multilinestring.Multiline;
-import org.apache.commons.math.util.MathUtils;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -41,10 +38,8 @@ import org.apache.metron.profiler.hbase.ColumnBuilder;
 import org.apache.metron.profiler.hbase.RowKeyBuilder;
 import org.apache.metron.profiler.hbase.SaltyRowKeyBuilder;
 import org.apache.metron.profiler.hbase.ValueOnlyColumnBuilder;
-import org.apache.metron.statistics.OnlineStatisticsProvider;
 import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -61,6 +56,7 @@ import static com.google.code.tempusfugit.temporal.Timeout.timeout;
 import static com.google.code.tempusfugit.temporal.WaitFor.waitOrTimeout;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /**
  * An integration test of the Profiler topology.
@@ -70,247 +66,103 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
   private static final String TEST_RESOURCES = "../../metron-analytics/metron-profiler/src/test";
   private static final String FLUX_PATH = "../metron-profiler/src/main/flux/profiler/remote.yaml";
 
-  /**
-   * {
-   * "ip_src_addr": "10.0.0.1",
-   * "protocol": "HTTPS",
-   * "length": 10,
-   * "bytes_in": 234
-   * }
-   */
-  @Multiline
-  private static String message1;
-
-  /**
-   * {
-   * "ip_src_addr": "10.0.0.2",
-   * "protocol": "HTTP",
-   * "length": 20,
-   * "bytes_in": 390
-   * }
-   */
-  @Multiline
-  private static String message2;
-
-  /**
-   * {
-   * "ip_src_addr": "10.0.0.3",
-   * "protocol": "DNS",
-   * "length": 30,
-   * "bytes_in": 560
-   * }
-   */
-  @Multiline
-  private static String message3;
-
-  private static ColumnBuilder columnBuilder;
-  private static ZKServerComponent zkComponent;
-  private static FluxTopologyComponent fluxComponent;
-  private static KafkaComponent kafkaComponent;
-  private static ConfigUploadComponent configUploadComponent;
-  private static ComponentRunner runner;
-  private static MockHTable profilerTable;
+  public static final long startAt = 10;
+  public static final String entity = "10.0.0.1";
 
   private static final String tableName = "profiler";
   private static final String columnFamily = "P";
-  private static final double epsilon = 0.001;
   private static final String inputTopic = Constants.INDEXING_TOPIC;
   private static final String outputTopic = "profiles";
   private static final int saltDivisor = 10;
 
-  private static final long windowLagMillis = TimeUnit.SECONDS.toMillis(5);
+  private static final long windowLagMillis = TimeUnit.SECONDS.toMillis(1);
   private static final long windowDurationMillis = TimeUnit.SECONDS.toMillis(5);
-  private static final long periodDurationMillis = TimeUnit.SECONDS.toMillis(15);
-  private static final long profileTimeToLiveMillis = TimeUnit.SECONDS.toMillis(20);
+  private static final long periodDurationMillis = TimeUnit.SECONDS.toMillis(10);
+  private static final long profileTimeToLiveMillis = TimeUnit.SECONDS.toMillis(15);
   private static final long maxRoutesPerBolt = 100000;
 
-  /**
-   * Tests the first example contained within the README.
-   */
-  @Test
-  public void testExample1() throws Exception {
-
-    uploadConfig(TEST_RESOURCES + "/config/zookeeper/readme-example-1");
-
-    // start the topology and write test messages to kafka
-    fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
-    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
-    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
-
-    // verify - ensure the profile is being persisted
-    waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
-            timeout(seconds(180)));
-
-    // verify - only 10.0.0.2 sends 'HTTP', thus there should be only 1 value
-    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily,
-            columnBuilder.getColumnQualifier("value"), Double.class);
-
-    // verify - there are 3 'HTTP' each with 390 bytes
-    Assert.assertTrue(actuals.stream().anyMatch(val ->
-            MathUtils.equals(390.0 * 3, val, epsilon)
-    ));
-  }
-
-  /**
-   * Tests the second example contained within the README.
-   */
-  @Test
-  public void testExample2() throws Exception {
-
-    uploadConfig(TEST_RESOURCES + "/config/zookeeper/readme-example-2");
-
-    // start the topology and write test messages to kafka
-    fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
-    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
-    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
-
-    // expect 2 values written by the profile; one for 10.0.0.2 and another for 10.0.0.3
-    final int expected = 2;
-
-    // verify - ensure the profile is being persisted
-    waitOrTimeout(() -> profilerTable.getPutLog().size() >= expected,
-            timeout(seconds(90)));
-
-    // verify - expect 2 results as 2 hosts involved; 10.0.0.2 sends 'HTTP' and 10.0.0.3 send 'DNS'
-    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily,
-            columnBuilder.getColumnQualifier("value"), Double.class);
-
-    // verify - 10.0.0.3 -> 1/4
-    Assert.assertTrue( "Could not find a value near 1/4. Actual values read are are: " + Joiner.on(",").join(actuals),
-            actuals.stream().anyMatch(val -> MathUtils.equals(val, 1.0/4.0, epsilon)
-    ));
-
-    // verify - 10.0.0.2 -> 4/1
-    Assert.assertTrue("Could not find a value near 4. Actual values read are are: " + Joiner.on(",").join(actuals),
-            actuals.stream().anyMatch(val -> MathUtils.equals(val, 4.0/1.0, epsilon)
-    ));
-  }
-
-  /**
-   * Tests the third example contained within the README.
-   */
-  @Test
-  public void testExample3() throws Exception {
-
-    uploadConfig(TEST_RESOURCES + "/config/zookeeper/readme-example-3");
+  private static ColumnBuilder columnBuilder;
+  private static ZKServerComponent zkComponent;
+  private static FluxTopologyComponent fluxComponent;
+  private static KafkaComponent kafkaComponent;
+  private static ConfigUploadComponent configUploadComponent;
+  private static ComponentRunner runner;
+  private static MockHTable profilerTable;
 
-    // start the topology and write test messages to kafka
-    fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
-    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
-    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
-
-    // verify - ensure the profile is being persisted
-    waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
-            timeout(seconds(90)));
-
-    // verify - only 10.0.0.2 sends 'HTTP', thus there should be only 1 value
-    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily,
-            columnBuilder.getColumnQualifier("value"), Double.class);
-
-    // verify - there are 5 'HTTP' messages each with a length of 20, thus the average should be 20
-    Assert.assertTrue("Could not find a value near 20. Actual values read are are: " + Joiner.on(",").join(actuals),
-            actuals.stream().anyMatch(val -> MathUtils.equals(val, 20.0, epsilon)
-    ));
-  }
+  private static String message1;
+  private static String message2;
+  private static String message3;
 
   /**
-   * Tests the fourth example contained within the README.
+   * The Profiler can generate profiles based on processing time.  With processing time,
+   * the Profiler builds profiles based on when the telemetry was processed.
+   *
+   * <p>Not defining a 'timestampField' within the Profiler configuration tells the Profiler
+   * to use processing time.
    */
   @Test
-  public void testExample4() throws Exception {
+  public void testProcessingTime() throws Exception {
 
-    uploadConfig(TEST_RESOURCES + "/config/zookeeper/readme-example-4");
+    // upload the config to zookeeper
+    uploadConfig(TEST_RESOURCES + "/config/zookeeper/processing-time-test");
 
     // start the topology and write test messages to kafka
     fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
-    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
-    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
-
-    // verify - ensure the profile is being persisted
-    waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
-            timeout(seconds(90)));
-
-    // verify - only 10.0.0.2 sends 'HTTP', thus there should be only 1 value
-    byte[] column = columnBuilder.getColumnQualifier("value");
-    List<OnlineStatisticsProvider> actuals = read(profilerTable.getPutLog(), columnFamily, column, OnlineStatisticsProvider.class);
-
-    // verify - there are 5 'HTTP' messages each with a length of 20, thus the average should be 20
-    Assert.assertTrue("Could not find a value near 20. Actual values read are are: " + Joiner.on(",").join(actuals),
-            actuals.stream().anyMatch(val -> MathUtils.equals(val.getMean(), 20.0, epsilon)
-    ));
-  }
 
-  @Test
-  public void testPercentiles() throws Exception {
-
-    uploadConfig(TEST_RESOURCES + "/config/zookeeper/percentiles");
+    // the messages that will be applied to the profile
+    kafkaComponent.writeMessages(inputTopic, message1);
+    kafkaComponent.writeMessages(inputTopic, message2);
+    kafkaComponent.writeMessages(inputTopic, message3);
 
-    // start the topology and write test messages to kafka
-    fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
-    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
-    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
+    // storm needs at least one message to close its event window
+    int attempt = 0;
+    while(profilerTable.getPutLog().size() == 0 && attempt++ < 10) {
 
-    // verify - ensure the profile is being persisted
-    waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
-            timeout(seconds(90)));
+      // sleep, at least beyond the current window
+      Thread.sleep(windowDurationMillis + windowLagMillis);
 
-    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily,
-            columnBuilder.getColumnQualifier("value"), Double.class);
+      // send another message to help close the current event window
+      kafkaComponent.writeMessages(inputTopic, message2);
+    }
 
-    // verify - the 70th percentile of x3, 20s = 20.0
-    Assert.assertTrue("Could not find a value near 20. Actual values read are are: " + Joiner.on(",").join(actuals),
-            actuals.stream().anyMatch(val -> MathUtils.equals(val, 20.0, epsilon)));
+    // validate what was flushed
+    List<Integer> actuals = read(
+            profilerTable.getPutLog(),
+            columnFamily,
+            columnBuilder.getColumnQualifier("value"),
+            Integer.class);
+    assertEquals(1, actuals.size());
+    assertTrue(actuals.get(0) >= 3);
   }
 
   /**
-   * The Profiler can optionally perform event time processing.  With event time processing,
+   * The Profiler can generate profiles using event time.  With event time processing,
    * the Profiler uses timestamps contained in the source telemetry.
    *
    * <p>Defining a 'timestampField' within the Profiler configuration tells the Profiler
    * from which field the timestamp should be extracted.
    */
   @Test
-  public void testEventTimeProcessing() throws Exception {
-
-    // constants used for the test
-    final long startAt = 10;
-    final String entity = "10.0.0.1";
-    final String profileName = "event-time-test";
-
-    // create some messages that contain a timestamp - a really old timestamp; close to 1970
-    String message1 = new MessageBuilder()
-            .withField("ip_src_addr", entity)
-            .withField("timestamp", startAt)
-            .build()
-            .toJSONString();
-
-    String message2 = new MessageBuilder()
-            .withField("ip_src_addr", entity)
-            .withField("timestamp", startAt + 100)
-            .build()
-            .toJSONString();
+  public void testEventTime() throws Exception {
 
+    // upload the profiler config to zookeeper
     uploadConfig(TEST_RESOURCES + "/config/zookeeper/event-time-test");
 
     // start the topology and write test messages to kafka
     fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, message1, message2);
+    kafkaComponent.writeMessages(inputTopic, message1);
+    kafkaComponent.writeMessages(inputTopic, message2);
+    kafkaComponent.writeMessages(inputTopic, message3);
 
-    // verify - ensure the profile is being persisted
-    waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
-            timeout(seconds(90)));
+    // wait until the profile is flushed
+    waitOrTimeout(() -> profilerTable.getPutLog().size() > 0, timeout(seconds(90)));
 
     List<Put> puts = profilerTable.getPutLog();
     assertEquals(1, puts.size());
 
     // inspect the row key to ensure the profiler used event time correctly.  the timestamp
     // embedded in the row key should match those in the source telemetry
-    byte[] expectedRowKey = generateExpectedRowKey(profileName, entity, startAt);
+    byte[] expectedRowKey = generateExpectedRowKey("event-time-test", entity, startAt);
     byte[] actualRowKey = puts.get(0).getRow();
     String msg = String.format("expected '%s', got '%s'",
             new String(expectedRowKey, "UTF-8"),
@@ -364,6 +216,26 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
 
   @BeforeClass
   public static void setupBeforeClass() throws UnableToStartException {
+
+    // create some messages that contain a timestamp - a really old timestamp; close to 1970
+    message1 = new MessageBuilder()
+            .withField("ip_src_addr", entity)
+            .withField("timestamp", startAt)
+            .build()
+            .toJSONString();
+
+    message2 = new MessageBuilder()
+            .withField("ip_src_addr", entity)
+            .withField("timestamp", startAt + 100)
+            .build()
+            .toJSONString();
+
+    message3 = new MessageBuilder()
+            .withField("ip_src_addr", entity)
+            .withField("timestamp", startAt + (windowDurationMillis * 2))
+            .build()
+            .toJSONString();
+
     columnBuilder = new ValueOnlyColumnBuilder(columnFamily);
 
     // storm topology properties

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
index 6205fbf..f5b46e6 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
@@ -18,6 +18,8 @@
 package org.apache.metron.common.configuration.profiler;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
 
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -225,32 +227,39 @@ public class ProfileConfig implements Serializable {
 
   @Override
   public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
+    if (this == o) {
+      return true;
+    }
 
-    ProfileConfig that = (ProfileConfig) o;
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
 
-    if (profile != null ? !profile.equals(that.profile) : that.profile != null) return false;
-    if (foreach != null ? !foreach.equals(that.foreach) : that.foreach != null) return false;
-    if (onlyif != null ? !onlyif.equals(that.onlyif) : that.onlyif != null) return false;
-    if (init != null ? !init.equals(that.init) : that.init != null) return false;
-    if (update != null ? !update.equals(that.update) : that.update != null) return false;
-    if (groupBy != null ? !groupBy.equals(that.groupBy) : that.groupBy != null) return false;
-    if (result != null ? !result.equals(that.result) : that.result != null) return false;
-    return expires != null ? expires.equals(that.expires) : that.expires == null;
+    ProfileConfig that = (ProfileConfig) o;
+    return new EqualsBuilder()
+            .append(profile, that.profile)
+            .append(foreach, that.foreach)
+            .append(onlyif, that.onlyif)
+            .append(init, that.init)
+            .append(update, that.update)
+            .append(groupBy, that.groupBy)
+            .append(result, that.result)
+            .append(expires, that.expires)
+            .isEquals();
   }
 
   @Override
   public int hashCode() {
-    int result1 = profile != null ? profile.hashCode() : 0;
-    result1 = 31 * result1 + (foreach != null ? foreach.hashCode() : 0);
-    result1 = 31 * result1 + (onlyif != null ? onlyif.hashCode() : 0);
-    result1 = 31 * result1 + (init != null ? init.hashCode() : 0);
-    result1 = 31 * result1 + (update != null ? update.hashCode() : 0);
-    result1 = 31 * result1 + (groupBy != null ? groupBy.hashCode() : 0);
-    result1 = 31 * result1 + (result != null ? result.hashCode() : 0);
-    result1 = 31 * result1 + (expires != null ? expires.hashCode() : 0);
-    return result1;
+    return new HashCodeBuilder(17, 37)
+            .append(profile)
+            .append(foreach)
+            .append(onlyif)
+            .append(init)
+            .append(update)
+            .append(groupBy)
+            .append(result)
+            .append(expires)
+            .toHashCode();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-platform/metron-common/src/test/java/org/apache/metron/common/zookeeper/ZKConfigurationsCacheIntegrationTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/test/java/org/apache/metron/common/zookeeper/ZKConfigurationsCacheIntegrationTest.java b/metron-platform/metron-common/src/test/java/org/apache/metron/common/zookeeper/ZKConfigurationsCacheIntegrationTest.java
index ec4a98a..5240d7a 100644
--- a/metron-platform/metron-common/src/test/java/org/apache/metron/common/zookeeper/ZKConfigurationsCacheIntegrationTest.java
+++ b/metron-platform/metron-common/src/test/java/org/apache/metron/common/zookeeper/ZKConfigurationsCacheIntegrationTest.java
@@ -154,7 +154,7 @@ public class ZKConfigurationsCacheIntegrationTest {
     }
     {
       //profiler
-      byte[] config = IOUtils.toByteArray(new FileInputStream(new File(profilerDir, "/readme-example-1/profiler.json")));
+      byte[] config = IOUtils.toByteArray(new FileInputStream(new File(profilerDir, "/event-time-test/profiler.json")));
       ConfigurationsUtils.writeProfilerConfigToZookeeper( config, client);
     }
     {
@@ -284,7 +284,7 @@ public class ZKConfigurationsCacheIntegrationTest {
     }
     //profiler
     {
-      File inFile = new File(profilerDir, "/readme-example-1/profiler.json");
+      File inFile = new File(profilerDir, "/event-time-test/profiler.json");
       ProfilerConfig expectedConfig = JSONUtils.INSTANCE.load(inFile, ProfilerConfig.class);
       ProfilerConfigurations config = cache.get( ProfilerConfigurations.class);
       assertEventually(() -> Assert.assertEquals(expectedConfig, config.getProfilerConfig()));

http://git-wip-us.apache.org/repos/asf/metron/blob/46bc63db/metron-platform/metron-hbase/src/main/java/org/apache/metron/hbase/bolt/HBaseBolt.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-hbase/src/main/java/org/apache/metron/hbase/bolt/HBaseBolt.java b/metron-platform/metron-hbase/src/main/java/org/apache/metron/hbase/bolt/HBaseBolt.java
index d16e2f6..6953b18 100644
--- a/metron-platform/metron-hbase/src/main/java/org/apache/metron/hbase/bolt/HBaseBolt.java
+++ b/metron-platform/metron-hbase/src/main/java/org/apache/metron/hbase/bolt/HBaseBolt.java
@@ -24,7 +24,7 @@ import java.lang.invoke.MethodHandles;
 import java.lang.reflect.InvocationTargetException;
 import java.util.Map;
 import java.util.Optional;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.metron.hbase.HTableProvider;
@@ -77,6 +77,8 @@ public class HBaseBolt extends BaseRichBolt {
 
   /**
    * The name of the class that should be used as a table provider.
+   *
+   * <p>Defaults to 'org.apache.metron.hbase.HTableProvider'.
    */
   protected String tableProviderClazzName = "org.apache.metron.hbase.HTableProvider";
 
@@ -126,6 +128,8 @@ public class HBaseBolt extends BaseRichBolt {
 
   @Override
   public Map<String, Object> getComponentConfiguration() {
+    LOG.debug("Tick tuples expected every {} second(s)", flushIntervalSecs);
+
     Config conf = new Config();
     conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, flushIntervalSecs);
     return conf;
@@ -136,7 +140,13 @@ public class HBaseBolt extends BaseRichBolt {
     this.collector = collector;
     this.batchHelper = new BatchHelper(batchSize, collector);
 
-    TableProvider provider = this.tableProvider == null ?getTableProvider(tableProviderClazzName):this.tableProvider;
+    TableProvider provider;
+    if(this.tableProvider == null) {
+      provider = createTableProvider(tableProviderClazzName);
+    } else {
+      provider = this.tableProvider;
+    }
+
     hbaseClient = new HBaseClient(provider, HBaseConfiguration.create(), tableName);
   }
 
@@ -147,6 +157,8 @@ public class HBaseBolt extends BaseRichBolt {
 
   @Override
   public void execute(Tuple tuple) {
+    LOG.trace("Received a tuple.");
+
     try {
       if (batchHelper.shouldHandle(tuple)) {
         save(tuple);
@@ -179,12 +191,15 @@ public class HBaseBolt extends BaseRichBolt {
     }
 
     batchHelper.addBatch(tuple);
+    LOG.debug("Added mutation to the batch; size={}", batchHelper.getBatchSize());
   }
 
   /**
    * Flush all saved operations.
    */
   private void flush() {
+    LOG.debug("About to flush a batch of {} mutation(s)", batchHelper.getBatchSize());
+
     this.hbaseClient.mutate();
     batchHelper.ack();
   }
@@ -193,7 +208,8 @@ public class HBaseBolt extends BaseRichBolt {
    * Creates a TableProvider based on a class name.
    * @param connectorImpl The class name of a TableProvider
    */
-  private static TableProvider getTableProvider(String connectorImpl) {
+  private static TableProvider createTableProvider(String connectorImpl) {
+    LOG.trace("Creating table provider; className={}", connectorImpl);
 
     // if class name not defined, use a reasonable default
     if(StringUtils.isEmpty(connectorImpl) || connectorImpl.charAt(0) == '$') {


[22/50] [abbrv] metron git commit: METRON-1462: Separate ES and Kibana from Metron Mpack (mmiklavc via mmiklavc) closes apache/metron#943

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-site.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-site.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-site.xml
deleted file mode 100755
index 34df1e4..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-site.xml
+++ /dev/null
@@ -1,198 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!-- Elastic search  Configurations -->
-
-<configuration supports_final="true">
-    <!-- Configurations -->
-    <property>
-        <name>cluster_name</name>
-        <value>metron</value>
-        <description>Elasticsearch Cluster Name identifies your Elasticsearch subsystem</description>
-    </property>
-    <property>
-        <name>masters_also_are_datanodes</name>
-        <value>"false"</value>
-        <description>ES Masters and Slaves cannot be installed on the same nodes.  Set this to "true" if you want the ES master nodes to serve as combined master/datanodes. Note: surround value in quotes.</description>
-        <value-attributes>
-            <type>string</type>
-        </value-attributes>
-    </property>
-    <property>
-        <name>zen_discovery_ping_unicast_hosts</name>
-        <!--Ideally this gets populated by the list of master eligible nodes (as an acceptable default).  Unsure how to do this.-->
-        <!--Also need to document whether should list masters only, or all ES nodes. I think this one is all nodes, but previous inline comment said Masters.-->
-        <value></value>
-        <description>Unicast discovery list of hosts to act as gossip routers, comma-separated list with square brackets: [ eshost1, eshost2 ]</description>
-    </property>
-    <property>
-        <name>index_number_of_shards</name>
-        <value>4</value>
-        <description>Set the number of shards (splits) of an index.  Changes are not effective after index creation. Usually set to 1 for single-node install.</description>
-    </property>
-    <property>
-        <name>index_number_of_replicas</name>
-        <value>2</value>
-        <description>Set the number of replicas (copies in addition to the first) of an index. Usually set to 0 for single-node install.</description>
-    </property>
-    <property>
-        <name>path_data</name>
-        <value>"/opt/lmm/es_data"</value>
-        <description>Comma-separated list of directories where to store index data allocated for each node: "/mnt/first","/mnt/second".  Number of paths should relate to number of shards, and preferably should be on separate physical volumes.</description>
-    </property>
-    <property>
-        <name>http_cors_enabled</name>
-        <value>"false"</value>
-        <description>Enable or disable cross-origin resource sharing, i.e. whether a browser on another origin can do requests to Elasticsearch. Defaults to false.</description>
-        <value-attributes>
-            <type>string</type>
-        </value-attributes>
-    </property>
-    <property>
-        <name>http_port</name>
-        <value>9200-9300</value>
-        <description>Set a custom port to listen for HTTP traffic</description>
-    </property>
-    <property>
-        <name>transport_tcp_port</name>
-        <value>9300-9400</value>
-        <description>Set a custom port for the node to node communication</description>
-    </property>
-    <!--  Multi-node Discovery -->
-    <property>
-        <name>discovery_zen_ping_timeout</name>
-        <value>3s</value>
-        <description>Wait for ping responses for master discovery</description>
-    </property>
-    <property>
-        <name>discovery_zen_fd_ping_interval</name>
-        <value>15s</value>
-        <description>Wait for ping for cluster discovery</description>
-    </property>
-    <property>
-        <name>discovery_zen_fd_ping_timeout</name>
-        <value>60s</value>
-        <description>Wait for ping for cluster discovery</description>
-    </property>
-    <property>
-        <name>discovery_zen_fd_ping_retries</name>
-        <value>5</value>
-        <description>Number of ping retries before blacklisting</description>
-    </property>
-    <!--  Gateway -->
-    <property>
-        <name>gateway_recover_after_data_nodes</name>
-        <value>3</value>
-        <description>Recover as long as this many data or master nodes have joined the cluster.</description>
-    </property>
-    <property>
-        <name>recover_after_time</name>
-        <value>15m</value>
-        <description>recover_after_time</description>
-    </property>
-    <property>
-        <name>expected_data_nodes</name>
-        <value>0</value>
-        <description>expected_data_nodes</description>
-    </property>
-    <!--  Index -->  
-    <property>
-        <name>index_merge_scheduler_max_thread_count</name>
-        <value>5</value>
-        <description>index.merge.scheduler.max_thread_count</description>
-    </property>
-    <property>
-        <name>indices_memory_index_store_throttle_type</name>
-        <value>none</value>
-        <description>index_store_throttle_type</description>
-    </property>
-    <property>
-        <name>index_refresh_interval</name>
-        <value>1s</value>
-        <description>index refresh interval</description>
-    </property>
-    <property>
-        <name>index_translog_flush_threshold_size</name>
-        <value>5g</value>
-        <description>index_translog_flush_threshold_size</description>
-    </property>
-    <property>
-        <name>indices_memory_index_buffer_size</name>
-        <value>10%</value>
-        <description>Percentage of heap used for write buffers</description>
-    </property>
-    <property>
-        <name>bootstrap_memory_lock</name>
-        <value>true</value>
-        <description>The third option on Linux/Unix systems only, is to use mlockall to try to lock the process address space into RAM, preventing any Elasticsearch memory from being swapped out</description>
-    </property>
-    <property>
-        <name>threadpool_bulk_queue_size</name>
-        <value>3000</value>
-        <description>It tells ES the number of  requests that can be queued for execution in the node when there is no thread available to execute a bulk request</description>
-    </property>
-    <property>
-        <name>threadpool_index_queue_size</name>
-        <value>1000</value>
-        <description>It tells ES the number of  requests that can be queued for execution in the node when there is no thread available to execute index request</description>
-    </property>
-    <property>
-        <name>indices_cluster_send_refresh_mapping</name>
-        <value>false</value>
-        <description>In order to make the index request more efficient, we have set this property on our data nodes</description>
-    </property>
-    <property>
-        <name>indices_fielddata_cache_size</name>
-        <value>25%</value>
-        <description>You need to keep in mind that not setting this value properly can cause:Facet searches and sorting to have very poor performance:The ES node to run out of memory if you run the facet query against a large index</description>
-    </property>
-    <property>
-        <name>cluster_routing_allocation_disk_watermark_high</name>
-        <value>0.99</value>
-        <description>Property used when multiple drives are used to understand max thresholds</description>
-    </property>
-    <property>
-        <name>cluster_routing_allocation_disk_threshold_enabled</name>
-        <value>true</value>
-        <description>Property used when multiple drives are used to understand if thresholding is active</description>
-    </property>   
-   <property>
-        <name>cluster_routing_allocation_disk_watermark_low</name>
-        <value>.97</value>
-        <description>Property used when multiple drives are used to understand min thresholds</description>
-    </property>
-    <property>
-        <name>cluster_routing_allocation_node_concurrent_recoveries</name>
-        <value>4</value>
-        <description>Max concurrent recoveries, useful for fast recovery of the cluster nodes on restart</description>
-    </property>
-    <property>
-        <name>network_host</name>
-        <value>[ _local_, _site_ ]</value>
-        <description>Network interface(s) ES will bind to within each node. "_site_" or a more specific external address is required for all multi-node clusters, and also recommended for single-node installs to allow access to ES reports from non-local hosts. Always include the square brackets. See https://www.elastic.co/guide/en/elasticsearch/reference/2.3/modules-network.html for ES documentation.</description>
-    </property>
-    <property>
-        <name>network_publish_host</name>
-        <value>[]</value>
-        <value-attributes>
-            <empty-value-valid>true</empty-value-valid>
-        </value-attributes>
-        <description>Network address ES will publish for client and peer use. Empty value causes it to pick from the values in network_host, which works in most simple environments. MUST set explicitly for MULTI-HOMED SYSTEMS. See https://www.elastic.co/guide/en/elasticsearch/reference/2.3/modules-network.html for ES documentation.</description>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-sysconfig.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-sysconfig.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-sysconfig.xml
deleted file mode 100755
index ea6ca38..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-sysconfig.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-    <property>
-        <name>elastic_home</name>
-        <value>/usr/share/elasticsearch/</value>
-        <description>Elasticsearch Home Directory</description>
-    </property>
-    <property>
-        <name>data_dir</name>
-        <value>/var/lib/elasticsearch/</value>
-        <description>Elasticsearch Data Directory</description>
-    </property>
-    <property>
-        <name>work_dir</name>
-        <value>/tmp/elasticsearch/</value>
-        <description>Elasticsearch Work Directory</description>
-    </property>
-    <property>
-        <name>conf_dir</name>
-        <value>/etc/elasticsearch/</value>
-        <description>Elasticsearch Configuration Directory</description>
-    </property>
-    <property>
-        <name>max_open_files</name>
-        <value>65536</value>
-        <description>Maximum number of open files</description>
-    </property>
-    <property>
-        <name>max_map_count</name>
-        <value>262144</value>
-        <description>Maximum number of memory map areas for process</description>
-    </property>
-
-    <!-- Elasticsearch sysconfig -->
-    <property>
-        <name>content</name>
-        <description>This is the jinja template for elastic sysconfig file</description>
-        <value>
-# Directory where the Elasticsearch binary distribution resides
-ES_HOME={{elastic_home}}
-
-# Maximum number of open files
-MAX_OPEN_FILES={{max_open_files}}
-
-# Maximum number of VMA (Virtual Memory Areas) a process can own
-MAX_MAP_COUNT={{max_map_count}}
-
-# Elasticsearch log directory
-LOG_DIR={{log_dir}}
-
-# Elasticsearch data directory
-DATA_DIR={{data_dir}}
-
-# Elasticsearch work directory
-WORK_DIR={{work_dir}}
-
-# Elasticsearch conf directory
-CONF_DIR={{conf_dir}}
-
-# User to run as, change this to a specific elasticsearch user if possible
-# Also make sure, this user can write into the log directories in case you change them
-# This setting only works for the init script, but has to be configured separately for systemd startup
-ES_USER={{elastic_user}}
-
-# Elasticsearch pid directory
-PID_DIR={{pid_dir}}
-
-# JAVA_HOME must be provided here for OS that use systemd service launch
-JAVA_HOME={{java64_home}}
-
-# Additional Java options - now preferential to use 'jvm.options' file instead
-ES_JAVA_OPTS=""
-
-# https://www.elastic.co/guide/en/elasticsearch/reference/5.6/_memory_lock_check.html
-MAX_LOCKED_MEMORY=unlimited
-        </value>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-systemd.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-systemd.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-systemd.xml
deleted file mode 100644
index 311e3c0..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/configuration/elastic-systemd.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-    <property>
-        <name>content</name>
-        <description>The jinja template for the Elasticsearch systemd override file.  Applies only to platforms that use systemd.</description>
-        <value>
-[Service]
-LimitMEMLOCK=infinity
-        </value>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/metainfo.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/metainfo.xml
deleted file mode 100755
index 47abb45..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/metainfo.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <schemaVersion>2.0</schemaVersion>
-    <services>
-        <service>
-            <name>ELASTICSEARCH</name>
-            <displayName>Elasticsearch</displayName>
-            <comment>Indexing and Search</comment>
-            <version>5.6.2</version>
-            <components>
-                <component>
-                    <name>ES_MASTER</name>
-                    <displayName>Elasticsearch Master</displayName>
-                    <category>MASTER</category>
-                    <cardinality>1+</cardinality>
-                    <commandScript>
-                        <script>scripts/elastic_master.py</script>
-                        <scriptType>PYTHON</scriptType>
-                        <timeout>600</timeout>
-                    </commandScript>
-                </component>
-                <component>
-                    <name>ES_SLAVE</name>
-                    <displayName>Elasticsearch Data Node</displayName>
-                    <category>SLAVE</category>
-                    <cardinality>0+</cardinality>
-                    <commandScript>
-                        <script>scripts/elastic_slave.py</script>
-                        <scriptType>PYTHON</scriptType>
-                        <timeout>600</timeout>
-                    </commandScript>
-                </component>
-            </components>
-            <osSpecifics>
-                <osSpecific>
-                    <osFamily>redhat6</osFamily>
-                    <packages>
-                        <package>
-                            <name>elasticsearch-5.6.2</name>
-                        </package>
-                    </packages>
-                </osSpecific>
-                <osSpecific>
-                    <osFamily>redhat7</osFamily>
-                    <packages>
-                        <package>
-                            <name>elasticsearch-5.6.2</name>
-                        </package>
-                    </packages>
-                </osSpecific>
-                <osSpecific>
-                    <osFamily>ubuntu14</osFamily>
-                    <packages>
-                        <package>
-                            <name>elasticsearch=5.6.2</name>
-                        </package>
-                    </packages>
-                </osSpecific>
-            </osSpecifics>
-            <commandScript>
-                <script>scripts/service_check.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>300</timeout>
-            </commandScript>
-            <configuration-dependencies>
-                <config-type>elastic-env</config-type>
-                <config-type>elastic-site</config-type>
-                <config-type>elastic-sysconfig</config-type>
-                <config-type>elastic-systemd</config-type>
-                <config-type>elastic-jvm-options</config-type>
-            </configuration-dependencies>
-            <restartRequiredAfterChange>true</restartRequiredAfterChange>
-            <quickLinksConfigurations>
-                <quickLinksConfiguration>
-                    <fileName>quicklinks.json</fileName>
-                    <default>true</default>
-                </quickLinksConfiguration>
-            </quickLinksConfigurations>
-        </service>
-    </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_commands.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_commands.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_commands.py
deleted file mode 100644
index 618d10a..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_commands.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from ambari_commons.os_check import OSCheck
-from resource_management.core.exceptions import ExecutionFailed
-from resource_management.core.exceptions import ComponentIsNotRunning
-from resource_management.core.resources.system import Execute
-from resource_management.core.resources.system import Directory
-from resource_management.core.resources.system import File
-from resource_management.core.source import InlineTemplate
-from resource_management.core.source import Template
-from resource_management.core.resources import User
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions import format as ambari_format
-from resource_management.libraries.functions.get_user_call_output import get_user_call_output
-
-
-def service_check(cmd, user, label):
-    """
-    Executes a SysV service check command that adheres to LSB-compliant
-    return codes.  The return codes are interpreted as defined
-    by the LSB.
-
-    See http://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/iniscrptact.html
-    for more information.
-
-    :param cmd: The service check command to execute.
-    :param label: The name of the service.
-    """
-    Logger.info("Performing service check; cmd={0}, user={1}, label={2}".format(cmd, user, label))
-    rc, out, err = get_user_call_output(cmd, user, is_checked_call=False)
-
-    if rc in [1, 2, 3]:
-      # if return code in [1, 2, 3], then 'program is not running' or 'program is dead'
-      Logger.info("{0} is not running".format(label))
-      raise ComponentIsNotRunning()
-
-    elif rc == 0:
-      # if return code = 0, then 'program is running or service is OK'
-      Logger.info("{0} is running".format(label))
-
-    else:
-      # else service state is unknown
-      err_msg = "{0} service check failed; cmd '{1}' returned {2}".format(label, cmd, rc)
-      Logger.error(err_msg)
-      raise ExecutionFailed(err_msg, rc, out, err)
-
-def is_systemd_running():
-    """
-    Determines if the platform is running Systemd.
-    :return True, if the platform is running Systemd.  False, otherwise.
-    """
-    Logger.info("Is the platform running Systemd?")
-    rc, out, err = get_user_call_output("pidof systemd", "root", is_checked_call=False)
-    if rc == 0:
-        Logger.info("Systemd was found")
-        return True
-    else:
-        Logger.info("Systemd was NOT found")
-        return False
-
-def configure_systemd(params):
-    """
-    Configure Systemd for Elasticsearch.
-    """
-    Logger.info("Configuring Systemd for Elasticsearch");
-
-    # ensure the systemd directory for elasticsearch overrides exists
-    Logger.info("Create Systemd directory for Elasticsearch: {0}".format(params.systemd_elasticsearch_dir))
-    Directory(params.systemd_elasticsearch_dir,
-              create_parents=True,
-              owner='root',
-              group='root')
-
-    # when using Elasticsearch packages on systems that use systemd, system
-    # limits must also be specified via systemd.
-    # see https://www.elastic.co/guide/en/elasticsearch/reference/5.6/setting-system-settings.html#systemd
-    Logger.info("Elasticsearch systemd limits: {0}".format(params.systemd_override_file))
-    File(params.systemd_override_file,
-         content=InlineTemplate(params.systemd_override_template),
-         owner="root",
-         group="root")
-
-    # reload the configuration
-    Execute("systemctl daemon-reload")
-
-def create_user(params):
-    """
-    Creates the user required for Elasticsearch.
-    """
-    Logger.info("Creating user={0} in group={1}".format(params.elastic_user, params.elastic_group))
-    User(params.elastic_user, action = "create", groups = params.elastic_group)
-
-def create_directories(params, directories):
-    """
-    Creates one or more directories.
-    """
-    Logger.info("Creating directories: {0}".format(directories))
-    Directory(directories,
-              create_parents=True,
-              mode=0755,
-              owner=params.elastic_user,
-              group=params.elastic_group
-              )
-
-def create_elastic_env(params):
-    """
-    Creates the Elasticsearch environment file.
-    """
-    Logger.info("Create Elasticsearch environment file.")
-    File("{0}/elastic-env.sh".format(params.conf_dir),
-         owner=params.elastic_user,
-         group=params.elastic_group,
-         content=InlineTemplate(params.elastic_env_sh_template))
-
-def create_elastic_site(params, template_name):
-    """
-    Creates the Elasticsearch site file.
-    """
-    Logger.info("Creating Elasticsearch site file; template={0}".format(template_name))
-
-    elastic_site = params.config['configurations']['elastic-site']
-    path = "{0}/elasticsearch.yml".format(params.conf_dir)
-    template = Template(template_name, configurations=elastic_site)
-    File(path,
-         content=template,
-         owner=params.elastic_user,
-         group=params.elastic_group)
-
-def get_elastic_config_path(default="/etc/default/elasticsearch"):
-    """
-    Defines the path to the Elasticsearch environment file.  This path will
-    differ based on the OS family.
-    :param default: The path used if the OS family is not recognized.
-    """
-    path = default
-    if OSCheck.is_redhat_family():
-      path = "/etc/sysconfig/elasticsearch"
-    elif OSCheck.is_ubuntu_family():
-      path = "/etc/default/elasticsearch"
-    else:
-      Logger.error("Unexpected OS family; using default path={0}".format(path))
-
-    return path
-
-def create_elastic_config(params):
-    """
-    Creates the Elasticsearch system config file.  Usually lands at either
-    /etc/sysconfig/elasticsearch or /etc/default/elasticsearch.
-    """
-    path = get_elastic_config_path()
-    Logger.info("Creating the Elasticsearch system config; path={0}".format(path))
-    File(path, owner="root", group="root", content=InlineTemplate(params.sysconfig_template))
-
-def create_elastic_pam_limits(params):
-    """
-    Creates the PAM limits for Elasticsearch.
-    """
-    Logger.info("Creating Elasticsearch PAM limits.")
-
-    # in some OS this folder may not exist, so create it
-    Logger.info("Ensure PAM limits directory exists: {0}".format(params.limits_conf_dir))
-    Directory(params.limits_conf_dir,
-              create_parents=True,
-              owner='root',
-              group='root')
-
-    Logger.info("Creating Elasticsearch PAM limits; file={0}".format(params.limits_conf_file))
-    File(params.limits_conf_file,
-         content=Template('elasticsearch_limits.conf.j2'),
-         owner="root",
-         group="root")
-
-def create_elastic_jvm_options(params):
-    """
-    Defines the jvm.options file used to specify JVM options.
-    """
-    path = "{0}/jvm.options".format(params.conf_dir)
-    Logger.info("Creating Elasticsearch JVM Options; file={0}".format(path))
-    File(path,
-         content=InlineTemplate(params.jvm_options_template),
-         owner=params.elastic_user,
-         group=params.elastic_group)
-
-def get_data_directories(params):
-    """
-    Returns the directories to use for storing Elasticsearch data.
-    """
-    path = params.path_data
-    path = path.replace('"', '')
-    path = path.replace(' ', '')
-    path = path.split(',')
-    dirs = [p.replace('"', '') for p in path]
-
-    Logger.info("Elasticsearch data directories: dirs={0}".format(dirs))
-    return dirs
-
-def configure_master():
-    """
-    Configures the Elasticsearch master node.
-    """
-    import params
-
-    # define the directories required
-    dirs = [
-      params.log_dir,
-      params.pid_dir,
-      params.conf_dir,
-      "{0}/scripts".format(params.conf_dir)
-    ]
-    dirs += get_data_directories(params)
-
-    # configure the elasticsearch master
-    create_user(params)
-    create_directories(params, dirs)
-    create_elastic_env(params)
-    create_elastic_site(params,  "elasticsearch.master.yaml.j2")
-    create_elastic_config(params)
-    create_elastic_pam_limits(params)
-    create_elastic_jvm_options(params)
-    if is_systemd_running():
-        configure_systemd(params)
-
-def configure_slave():
-    """
-    Configures the Elasticsearch slave node.
-    """
-    import params
-
-    # define the directories required
-    dirs = [
-      params.log_dir,
-      params.pid_dir,
-      params.conf_dir,
-    ]
-    dirs += get_data_directories(params)
-
-    # configure the elasticsearch slave
-    create_user(params)
-    create_directories(params, dirs)
-    create_elastic_env(params)
-    create_elastic_site(params, "elasticsearch.slave.yaml.j2")
-    create_elastic_config(params)
-    create_elastic_pam_limits(params)
-    create_elastic_jvm_options(params)
-    if is_systemd_running():
-        configure_systemd(params)

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_master.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_master.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_master.py
deleted file mode 100755
index 142ce4e..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_master.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core import shell
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.script import Script
-from resource_management.core.logger import Logger
-from elastic_commands import service_check
-from elastic_commands import configure_master
-
-class Elasticsearch(Script):
-
-    def install(self, env):
-        import params
-        env.set_params(params)
-        Logger.info('Install Elasticsearch master node')
-        self.install_packages(env)
-
-    def configure(self, env, upgrade_type=None, config_dir=None):
-        import params
-        env.set_params(params)
-        Logger.info('Configure Elasticsearch master node')
-        configure_master()
-
-    def stop(self, env, upgrade_type=None):
-        import params
-        env.set_params(params)
-        Logger.info('Stop Elasticsearch master node')
-        Execute("service elasticsearch stop")
-
-    def start(self, env, upgrade_type=None):
-        import params
-        env.set_params(params)
-        Logger.info('Start Elasticsearch master node')
-        self.configure(env)
-        Execute("service elasticsearch start")
-
-    def status(self, env):
-        import params
-        env.set_params(params)
-        Logger.info('Status check Elasticsearch master node')
-        service_check(
-          cmd="service elasticsearch status",
-          user=params.elastic_status_check_user,
-          label="Elasticsearch Master")
-
-    def restart(self, env):
-        import params
-        env.set_params(params)
-        Logger.info('Restart Elasticsearch master node')
-        self.configure(env)
-        Execute("service elasticsearch restart")
-
-
-if __name__ == "__main__":
-    Elasticsearch().execute()

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_slave.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_slave.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_slave.py
deleted file mode 100755
index 2d559ff..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/elastic_slave.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.script import Script
-
-from elastic_commands import service_check
-from elastic_commands import configure_slave
-
-class Elasticsearch(Script):
-
-    def install(self, env):
-        import params
-        env.set_params(params)
-        Logger.info('Install Elasticsearch slave node')
-        self.install_packages(env)
-
-    def configure(self, env, upgrade_type=None, config_dir=None):
-        import params
-        env.set_params(params)
-        Logger.info('Configure Elasticsearch slave node')
-        configure_slave()
-
-    def stop(self, env, upgrade_type=None):
-        import params
-        env.set_params(params)
-        Logger.info('Stop Elasticsearch slave node')
-        Execute("service elasticsearch stop")
-
-    def start(self, env, upgrade_type=None):
-        import params
-        env.set_params(params)
-        self.configure(env)
-        Execute("service elasticsearch start")
-
-    def status(self, env):
-        import params
-        env.set_params(params)
-        Logger.info('Status check Elasticsearch slave node')
-        service_check(
-          cmd="service elasticsearch status",
-          user=params.elastic_status_check_user,
-          label="Elasticsearch Slave")
-
-    def restart(self, env):
-        import params
-        env.set_params(params)
-        Logger.info('Restart Elasticsearch slave node')
-        self.configure(env)
-        Execute("service elasticsearch restart")
-
-
-if __name__ == "__main__":
-    Elasticsearch().execute()

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/params.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/params.py
deleted file mode 100755
index 24f2306..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/params.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script import Script
-
-def yamlify_variables(var) :
-  if isinstance(var, type(True)):
-    return str(var).lower()
-  else:
-    return var
-
-# server configurations
-config = Script.get_config()
-
-masters_also_are_datanodes = config['configurations']['elastic-site']['masters_also_are_datanodes']
-elastic_home = config['configurations']['elastic-sysconfig']['elastic_home']
-data_dir = config['configurations']['elastic-sysconfig']['data_dir']
-work_dir = config['configurations']['elastic-sysconfig']['work_dir']
-conf_dir = config['configurations']['elastic-sysconfig']['conf_dir']
-heap_size = config['configurations']['elastic-sysconfig']['heap_size']
-max_open_files = config['configurations']['elastic-sysconfig']['max_open_files']
-max_map_count = config['configurations']['elastic-sysconfig']['max_map_count']
-
-elastic_user = config['configurations']['elastic-env']['elastic_user']
-elastic_group = config['configurations']['elastic-env']['elastic_group']
-log_dir = config['configurations']['elastic-env']['elastic_log_dir']
-pid_dir = config['configurations']['elastic-env']['elastic_pid_dir']
-
-hostname = config['hostname']
-java64_home = config['hostLevelParams']['java_home']
-elastic_env_sh_template = config['configurations']['elastic-env']['content']
-sysconfig_template = config['configurations']['elastic-sysconfig']['content']
-
-cluster_name = config['configurations']['elastic-site']['cluster_name']
-zen_discovery_ping_unicast_hosts = config['configurations']['elastic-site']['zen_discovery_ping_unicast_hosts']
-
-path_data = config['configurations']['elastic-site']['path_data']
-http_cors_enabled = config['configurations']['elastic-site']['http_cors_enabled']
-http_port = config['configurations']['elastic-site']['http_port']
-transport_tcp_port = config['configurations']['elastic-site']['transport_tcp_port']
-
-recover_after_time = config['configurations']['elastic-site']['recover_after_time']
-gateway_recover_after_data_nodes = config['configurations']['elastic-site']['gateway_recover_after_data_nodes']
-expected_data_nodes = config['configurations']['elastic-site']['expected_data_nodes']
-index_merge_scheduler_max_thread_count = config['configurations']['elastic-site']['index_merge_scheduler_max_thread_count']
-index_translog_flush_threshold_size = config['configurations']['elastic-site']['index_translog_flush_threshold_size']
-index_refresh_interval = config['configurations']['elastic-site']['index_refresh_interval']
-indices_memory_index_store_throttle_type = config['configurations']['elastic-site']['indices_memory_index_store_throttle_type']
-index_number_of_shards = config['configurations']['elastic-site']['index_number_of_shards']
-index_number_of_replicas = config['configurations']['elastic-site']['index_number_of_replicas']
-indices_memory_index_buffer_size = config['configurations']['elastic-site']['indices_memory_index_buffer_size']
-bootstrap_memory_lock = yamlify_variables(config['configurations']['elastic-site']['bootstrap_memory_lock'])
-threadpool_bulk_queue_size = config['configurations']['elastic-site']['threadpool_bulk_queue_size']
-cluster_routing_allocation_node_concurrent_recoveries = config['configurations']['elastic-site']['cluster_routing_allocation_node_concurrent_recoveries']
-cluster_routing_allocation_disk_watermark_low = config['configurations']['elastic-site']['cluster_routing_allocation_disk_watermark_low']
-cluster_routing_allocation_disk_threshold_enabled = yamlify_variables(config['configurations']['elastic-site']['cluster_routing_allocation_disk_threshold_enabled'])
-cluster_routing_allocation_disk_watermark_high = config['configurations']['elastic-site']['cluster_routing_allocation_disk_watermark_high']
-indices_fielddata_cache_size = config['configurations']['elastic-site']['indices_fielddata_cache_size']
-indices_cluster_send_refresh_mapping = yamlify_variables(config['configurations']['elastic-site']['indices_cluster_send_refresh_mapping'])
-threadpool_index_queue_size = config['configurations']['elastic-site']['threadpool_index_queue_size']
-
-discovery_zen_ping_timeout = config['configurations']['elastic-site']['discovery_zen_ping_timeout']
-discovery_zen_fd_ping_interval = config['configurations']['elastic-site']['discovery_zen_fd_ping_interval']
-discovery_zen_fd_ping_timeout = config['configurations']['elastic-site']['discovery_zen_fd_ping_timeout']
-discovery_zen_fd_ping_retries = config['configurations']['elastic-site']['discovery_zen_fd_ping_retries']
-
-network_host = config['configurations']['elastic-site']['network_host']
-network_publish_host = config['configurations']['elastic-site']['network_publish_host']
-
-limits_conf_dir = "/etc/security/limits.d"
-limits_conf_file = limits_conf_dir + "/elasticsearch.conf"
-elastic_user_nofile_limit = config['configurations']['elastic-env']['elastic_user_nofile_limit']
-elastic_user_nproc_limit = config['configurations']['elastic-env']['elastic_user_nproc_limit']
-elastic_user_memlock_soft_limit = config['configurations']['elastic-env']['elastic_user_memlock_soft_limit']
-elastic_user_memlock_hard_limit = config['configurations']['elastic-env']['elastic_user_memlock_hard_limit']
-
-# the status check (service elasticsearch status) cannot be run by the 'elasticsearch'
-# user due to the default permissions that are set when the package is installed.  the
-# status check must be run as root
-elastic_status_check_user = 'root'
-
-# when using the RPM or Debian packages on systems that use systemd, system limits
-# must be specified via systemd.
-# see https://www.elastic.co/guide/en/elasticsearch/reference/5.6/setting-system-settings.html#systemd
-systemd_parent_dir = '/etc/systemd/system/'
-systemd_elasticsearch_dir = systemd_parent_dir + 'elasticsearch.service.d/'
-systemd_override_file = systemd_elasticsearch_dir + 'override.conf'
-systemd_override_template = config['configurations']['elastic-systemd']['content']
-
-heap_size = config['configurations']['elastic-jvm-options']['heap_size']
-jvm_options_template = config['configurations']['elastic-jvm-options']['content']

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/properties_config.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/properties_config.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/properties_config.py
deleted file mode 100755
index ef9f6dd..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/properties_config.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.resources.system import File
-from resource_management.core.source import InlineTemplate
-
-
-def properties_inline_template(configurations):
-    return InlineTemplate('''{% for key, value in configurations_dict.items() %}{{ key }}={{ value }}
-{% endfor %}''', configurations_dict=configurations)
-
-
-def properties_config(filename, configurations=None, conf_dir=None,
-                      mode=None, owner=None, group=None, brokerid=None):
-    config_content = properties_inline_template(configurations)
-    File(format("{conf_dir}/{filename}"), content=config_content, owner=owner,
-         group=group, mode=mode)

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/service_check.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/service_check.py
deleted file mode 100755
index 3ac7c83..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/service_check.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from __future__ import print_function
-
-import subprocess
-import sys
-import re
-
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.script import Script
-from resource_management.core.logger import Logger
-
-class ServiceCheck(Script):
-
-    def service_check(self, env):
-        import params
-        env.set_params(params)
-        Logger.info("Running Elasticsearch service check")
-
-        port = self.get_port_from_range(params.http_port)
-        self.check_cluster_health(params.hostname, port)
-        self.index_document(params.hostname, port)
-
-        Logger.info("Elasticsearch service check successful")
-        exit(0)
-
-    def index_document(self, host, port, doc='{"name": "Ambari Service Check"}', index="ambari_service_check"):
-        """
-        Tests the health of Elasticsearch by indexing a document.
-
-        :param host: The name of a host running Elasticsearch.
-        :param port: The Elasticsearch HTTP port.
-        :param doc: The test document to put.
-        :param index: The name of the test index.
-        """
-        # put a document into a new index
-        Execute("curl -XPUT 'http://%s:%s/%s/test/1' -d '%s'" % (host, port, index, doc), logoutput=True)
-
-        # retrieve the document...  use subprocess because we actually need the results here.
-        cmd_retrieve = "curl -XGET 'http://%s:%s/%s/test/1'" % (host, port, index)
-        proc = subprocess.Popen(cmd_retrieve, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
-        (stdout, stderr) = proc.communicate()
-        response_retrieve = stdout
-        Logger.info("Retrieval response is: %s" % response_retrieve)
-        expected_retrieve = '{"_index":"%s","_type":"test","_id":"1","_version":1,"found":true,"_source":%s}' \
-            % (index, doc)
-
-        # delete the test index
-        cmd_delete = "curl -XDELETE 'http://%s:%s/%s'" % (host, port, index)
-        proc = subprocess.Popen(cmd_delete, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
-        (stdout, stderr) = proc.communicate()
-        response_delete = stdout
-        Logger.info("Delete index response is: %s" % response_retrieve)
-        expected_delete = '{"acknowledged":true}'
-
-        if (expected_retrieve == response_retrieve) and (expected_delete == response_delete):
-            Logger.info("Successfully indexed document in Elasticsearch")
-        else:
-            Logger.info("Unable to retrieve document from Elasticsearch")
-            sys.exit(1)
-
-    def check_cluster_health(self, host, port, status="green", timeout="120s"):
-        """
-        Checks Elasticsearch cluster health.  Will wait for a given health
-        state to be reached.
-
-        :param host: The name of a host running Elasticsearch.
-        :param port: The Elasticsearch HTTP port.
-        :param status: The expected cluster health state.  By default, green.
-        :param timeout: How long to wait for the cluster.  By default, 120 seconds.
-        """
-        Logger.info("Checking cluster health")
-
-        cmd = "curl -sS -XGET 'http://{0}:{1}/_cluster/health?wait_for_status={2}&timeout={3}' | grep '\"status\":\"{2}\"'"
-        Execute(cmd.format(host, port, status, timeout), logoutput=True, tries=5, try_sleep=10)
-
-    def get_port_from_range(self, port_range, delimiter="-", default="9200"):
-        """
-        Elasticsearch is configured with a range of ports to bind to, such as
-        9200-9300.  This function identifies a single port within the given range.
-
-        :param port_range: A range of ports that Elasticsearch binds to.
-        :param delimiter: The port range delimiter, by default "-".
-        :param default: If no port can be identified in the port_range, the default is returned.
-        :return A single port within the given range.
-        """
-        port = default
-        if delimiter in port_range:
-            ports = port_range.split(delimiter)
-            if len(ports) > 0:
-                port = ports[0]
-
-        return port
-
-
-if __name__ == "__main__":
-    ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/status_params.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/status_params.py
deleted file mode 100755
index 0629735..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/scripts/status_params.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script import Script
-
-config = Script.get_config()
-
-elastic_pid_dir = config['configurations']['elastic-env']['elastic_pid_dir']
-elastic_pid_file = format("{elastic_pid_dir}/elasticsearch.pid")
-elastic_user = config['configurations']['elastic-env']['elastic_user']

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.master.yaml.j2
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.master.yaml.j2 b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.master.yaml.j2
deleted file mode 100755
index 8e20ba2..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.master.yaml.j2
+++ /dev/null
@@ -1,77 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-cluster:
-  name:   {{cluster_name}} 
-  routing:
-    allocation.node_concurrent_recoveries: {{cluster_routing_allocation_node_concurrent_recoveries}}
-    allocation.disk.watermark.low: {{cluster_routing_allocation_disk_watermark_low}}
-    allocation.disk.threshold_enabled: {{cluster_routing_allocation_disk_threshold_enabled}}
-    allocation.disk.watermark.high: {{cluster_routing_allocation_disk_watermark_high}}
-
-discovery:
-  zen:
-    ping:
-      unicast:
-        hosts: {{zen_discovery_ping_unicast_hosts}}
-
-node:
-  data: {{ masters_also_are_datanodes }}
-  master: true
-  name: {{hostname}}
-path:
-  data: {{path_data}}
-
-http:
-  port: {{http_port}}
-  cors.enabled: {{http_cors_enabled}}
-
-
-transport:
-  tcp:
-    port: {{transport_tcp_port}}
-
-gateway:
-  recover_after_data_nodes: {{gateway_recover_after_data_nodes}}
-  recover_after_time: {{recover_after_time}}
-  expected_data_nodes: {{expected_data_nodes}}
-# https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html
-indices:
-  store.throttle.type: {{indices_memory_index_store_throttle_type}}
-  memory:
-   index_buffer_size: {{indices_memory_index_buffer_size}}
-  fielddata:
-   cache.size: {{indices_fielddata_cache_size}}
-
-bootstrap:
-  memory_lock: {{bootstrap_memory_lock}}
-  system_call_filter: false
-
-thread_pool:
-  bulk:
-    queue_size: {{threadpool_bulk_queue_size}}
-  index:
-    queue_size: {{threadpool_index_queue_size}}
-
-discovery.zen.ping_timeout: {{discovery_zen_ping_timeout}}
-discovery.zen.fd.ping_interval: {{discovery_zen_fd_ping_interval}}
-discovery.zen.fd.ping_timeout: {{discovery_zen_fd_ping_timeout}}
-discovery.zen.fd.ping_retries: {{discovery_zen_fd_ping_retries}}
-
-network.host: {{network_host}}
-network.publish_host: {{network_publish_host}}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.slave.yaml.j2
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.slave.yaml.j2 b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.slave.yaml.j2
deleted file mode 100755
index 6bf8399..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch.slave.yaml.j2
+++ /dev/null
@@ -1,78 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-cluster:
-  name:   {{cluster_name}} 
-  routing:
-    allocation.node_concurrent_recoveries: {{cluster_routing_allocation_node_concurrent_recoveries}}
-    allocation.disk.watermark.low: {{cluster_routing_allocation_disk_watermark_low}}
-    allocation.disk.threshold_enabled: {{cluster_routing_allocation_disk_threshold_enabled}}
-    allocation.disk.watermark.high: {{cluster_routing_allocation_disk_watermark_high}}
-
-discovery:
-  zen:
-    ping:
-      unicast:
-        hosts: {{zen_discovery_ping_unicast_hosts}}
-
-node:
-  data: true
-  master: false
-  name: {{hostname}}
-path:
-  data: {{path_data}}
-
-http:
-  port: {{http_port}}
-  cors.enabled: {{http_cors_enabled}}
-
-
-transport:
-  tcp:
-    port: {{transport_tcp_port}}
-
-gateway:
-  recover_after_data_nodes: {{gateway_recover_after_data_nodes}}
-  recover_after_time: {{recover_after_time}}
-  expected_data_nodes: {{expected_data_nodes}}
-
-# https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html
-indices:
-  store.throttle.type: {{indices_memory_index_store_throttle_type}}
-  memory:
-   index_buffer_size: {{indices_memory_index_buffer_size}}
-  fielddata:
-   cache.size: {{indices_fielddata_cache_size}}
-
-bootstrap:
-  memory_lock: {{bootstrap_memory_lock}}
-  system_call_filter: false
-
-thread_pool:
-  bulk:
-    queue_size: {{threadpool_bulk_queue_size}}
-  index:
-    queue_size: {{threadpool_index_queue_size}}
-
-discovery.zen.ping_timeout: {{discovery_zen_ping_timeout}}
-discovery.zen.fd.ping_interval: {{discovery_zen_fd_ping_interval}}
-discovery.zen.fd.ping_timeout: {{discovery_zen_fd_ping_timeout}}
-discovery.zen.fd.ping_retries: {{discovery_zen_fd_ping_retries}}
-
-network.host: {{network_host}}
-network.publish_host: {{network_publish_host}}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch_limits.conf.j2
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch_limits.conf.j2 b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch_limits.conf.j2
deleted file mode 100644
index 99f72e1..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/package/templates/elasticsearch_limits.conf.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-#  Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
-{{elastic_user}}	-	nproc  {{elastic_user_nproc_limit}}
-{{elastic_user}}	-	nofile {{elastic_user_nofile_limit}}
-{{elastic_user}}	soft	memlock	{{elastic_user_memlock_soft_limit}}
-{{elastic_user}}	hard	memlock	{{elastic_user_memlock_hard_limit}}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/quicklinks/quicklinks.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/quicklinks/quicklinks.json
deleted file mode 100644
index 909828b..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,43 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"HTTP_ONLY"
-    },
-
-    "links": [
-      {
-        "name": "es_health_link",
-        "label": "Elasticsearch Health",
-        "requires_user_name": "false",
-        "component_name": "ES_MASTER",
-        "url":"%@://%@:%@/_cat/health?v",
-        "port":{
-          "http_property": "http_port",
-          "http_default_port": "9200",
-          "https_property": "http_port",
-          "https_default_port": "9200",
-          "regex": "^(\\d+)",
-          "site": "elastic-site"
-        }
-      },
-      {
-        "name": "es_indices_link",
-        "label": "Elasticsearch Indexes",
-        "requires_user_name": "false",
-        "component_name": "ES_MASTER",
-        "url":"%@://%@:%@/_cat/indices?v",
-        "port":{
-          "http_property": "http_port",
-          "http_default_port": "9200",
-          "https_property": "http_port",
-          "https_default_port": "9200",
-          "regex": "^(\\d+)",
-          "site": "elastic-site"
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/role_command_order.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/role_command_order.json
deleted file mode 100755
index 130d018..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/ELASTICSEARCH/5.6.2/role_command_order.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-  "_comment" : "Record format:",
-  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
-  "general_deps" : {
-    "_comment" : "dependencies for all cases",
-    "ELASTICSEARCH_SERVICE_CHECK-SERVICE_CHECK" : ["ES_MASTER-START", "ES_SLAVE-START"]
-  }
-}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-env.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-env.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-env.xml
deleted file mode 100755
index 1246405..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-env.xml
+++ /dev/null
@@ -1,72 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>kibana_user</name>
-    <value>kibana</value>
-    <property-type>USER</property-type>
-    <description>Service User for Kibana</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-  <property>
-    <name>kabana_group</name>
-    <value>kibana</value>
-    <property-type>GROUP</property-type>
-    <description>Service Group for Kibana</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-  <property require-input="true">
-    <name>kibana_server_host</name>
-    <value>0.0.0.0</value>
-    <description>Host name or IP address that Kibana should bind to.</description>
-  </property>
-  <property require-input="true">
-    <name>kibana_log_dir</name>
-    <value>/var/log/kibana</value>
-    <description>Log directory for Kibana</description>
-  </property>
-  <property require-input="true">
-    <name>kibana_pid_dir</name>
-    <value>/var/run/kibana</value>
-    <description>PID directory for Kibana</description>
-  </property>
-  <property require-input="true">
-    <name>kibana_es_url</name>
-    <value></value>
-    <description>The Elasticsearch instance to use for all your queries. (http://eshost:9200)</description>
-  </property>
-  <property require-input="true">
-    <name>kibana_server_port</name>
-    <value>5000</value>
-    <description>Kibana back end server port to use.</description>
-  </property>
-  <property require-input="true">
-    <name>kibana_default_application</name>
-    <value>default</value>
-    <description>The default application to load.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-site.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-site.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-site.xml
deleted file mode 100755
index d8d0513..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/configuration/kibana-site.xml
+++ /dev/null
@@ -1,113 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-    <!-- kibana.yml -->
-    <property>
-        <name>content</name>
-        <display-name>kibana.yml template</display-name>
-        <description>This is the jinja template for kibana.yml file</description>
-        <value>
-# Kibana is served by a back end server. This controls which port to use.
-server.port: {{ kibana_port }}
-
-# The host to bind the server to.
-# Kibana (like Elasticsearch) now binds to localhost for security purposes instead of 0.0.0.0 (all addresses). Previous binding to 0.0.0.0 also caused issues for Windows users.
-server.host: {{ kibana_server_host }}
-
-# If you are running kibana behind a proxy, and want to mount it at a path,
-# specify that path here. The basePath can't end in a slash.
-# server.basePath: ""
-
-# The maximum payload size in bytes on incoming server requests.
-# server.maxPayloadBytes: 1048576
-
-# The Elasticsearch instance to use for all your queries.
-elasticsearch.url: {{ es_url }}
-
-# preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false,
-# then the host you use to connect to *this* Kibana instance will be sent.
-# elasticsearch.preserveHost: true
-
-# Kibana uses an index in Elasticsearch to store saved searches, visualizations
-# and dashboards. It will create a new index if it doesn't already exist.
-# kibana.index: ".kibana"
-
-# The default application to load.
-kibana.defaultAppId: "{{ kibana_default_application }}"
-
-# If your Elasticsearch is protected with basic auth, these are the user credentials
-# used by the Kibana server to perform maintenance on the kibana_index at startup. Your Kibana
-# users will still need to authenticate with Elasticsearch (which is proxied through
-# the Kibana server)
-# elasticsearch.username: "user"
-# elasticsearch.password: "pass"
-
-# SSL for outgoing requests from the Kibana Server to the browser (PEM formatted)
-# server.ssl.cert: /path/to/your/server.crt
-# server.ssl.key: /path/to/your/server.key
-
-# Optional setting to validate that your Elasticsearch backend uses the same key files (PEM formatted)
-# elasticsearch.ssl.cert: /path/to/your/client.crt
-# elasticsearch.ssl.key: /path/to/your/client.key
-
-# If you need to provide a CA certificate for your Elasticsearch instance, put
-# the path of the pem file here.
-# elasticsearch.ssl.ca: /path/to/your/CA.pem
-
-# Set to false to have a complete disregard for the validity of the SSL
-# certificate.
-# elasticsearch.ssl.verify: true
-
-# Time in milliseconds to wait for elasticsearch to respond to pings, defaults to
-# request_timeout setting
-# elasticsearch.pingTimeout: 1500
-
-# Time in milliseconds to wait for responses from the back end or elasticsearch.
-# This must be > 0
-# elasticsearch.requestTimeout: 30000
-
-# Time in milliseconds for Elasticsearch to wait for responses from shards.
-# Set to 0 to disable.
-# elasticsearch.shardTimeout: 0
-
-# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying
-# elasticsearch.startupTimeout: 5000
-
-# Set the path to where you would like the process id file to be created.
-# pid.file: /var/run/kibana.pid
-
-# If you would like to send the log output to a file you can set the path below.
-logging.dest: {{ log_dir }}/kibana.log
-
-# Set this to true to suppress all logging output.
-# logging.silent: false
-
-# Set this to true to suppress all logging output except for error messages.
-# logging.quiet: false
-
-# Set this to true to log all events, including system usage information and all requests.
-# logging.verbose: false
-        </value>
-        <value-attributes>
-            <type>content</type>
-        </value-attributes>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/metainfo.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/metainfo.xml
deleted file mode 100755
index 06b61a1..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/metainfo.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <schemaVersion>2.0</schemaVersion>
-    <services>
-        <service>
-            <name>KIBANA</name>
-            <displayName>Kibana</displayName>
-            <comment>Kibana Dashboard</comment>
-            <version>5.6.2</version>
-            <components>
-                <component>
-                    <name>KIBANA_MASTER</name>
-                    <displayName>Kibana Server</displayName>
-                    <category>MASTER</category>
-                    <cardinality>1</cardinality>
-                    <commandScript>
-                        <script>scripts/kibana_master.py</script>
-                        <scriptType>PYTHON</scriptType>
-                        <timeout>600</timeout>
-                    </commandScript>
-                    <customCommands>
-                        <customCommand>
-                            <name>LOAD_TEMPLATE</name>
-                            <background>false</background>
-                            <commandScript>
-                                <script>scripts/kibana_master.py</script>
-                                <scriptType>PYTHON</scriptType>
-                            </commandScript>
-                        </customCommand>
-                    </customCommands>
-                </component>
-            </components>
-            <osSpecifics>
-                <osSpecific>
-                    <osFamily>redhat6</osFamily>
-                    <packages>
-                        <package>
-                            <name>python-elasticsearch</name>
-                        </package>
-                        <package>
-                            <name>kibana-5.6.2</name>
-                        </package>
-                    </packages>
-                </osSpecific>
-                <osSpecific>
-                    <osFamily>redhat7</osFamily>
-                    <packages>
-                        <package>
-                            <name>python-elasticsearch</name>
-                        </package>
-                        <package>
-                            <name>kibana-5.6.2</name>
-                        </package>
-                    </packages>
-                </osSpecific>
-                <osSpecific>
-                    <osFamily>ubuntu14</osFamily>
-                    <packages>
-                        <package>
-                            <name>kibana=5.6.2</name>
-                        </package>
-                    </packages>
-                </osSpecific>
-            </osSpecifics>
-            <configuration-dependencies>
-                <config-type>kibana-env</config-type>
-                <config-type>kibana-site</config-type>
-            </configuration-dependencies>
-            <restartRequiredAfterChange>true</restartRequiredAfterChange>
-            <quickLinksConfigurations>
-                <quickLinksConfiguration>
-                    <fileName>quicklinks.json</fileName>
-                    <default>true</default>
-                </quickLinksConfiguration>
-            </quickLinksConfigurations>
-        </service>
-    </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/common.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/common.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/common.py
deleted file mode 100644
index 37100cd..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/common.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions.get_user_call_output import get_user_call_output
-from resource_management.core.exceptions import ExecutionFailed
-from resource_management.core.exceptions import ComponentIsNotRunning
-
-def service_check(cmd, user, label):
-    """
-    Executes a service check command that adheres to LSB-compliant
-    return codes.  The return codes are interpreted as defined
-    by the LSB.
-
-    See http://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/iniscrptact.html
-    for more information.
-
-    :param cmd: The service check command to execute.
-    :param label: The name of the service.
-    """
-    Logger.info("Performing service check; cmd={0}, user={1}, label={2}".format(cmd, user, label))
-    rc, out, err = get_user_call_output(cmd, user, is_checked_call=False)
-
-    if len(err) > 0:
-      Logger.error(err)
-
-    if rc in [1, 2, 3]:
-      # if return code in [1, 2, 3], then 'program is not running' or 'program is dead'
-      Logger.info("{0} is not running".format(label))
-      raise ComponentIsNotRunning()
-
-    elif rc == 0:
-      # if return code = 0, then 'program is running or service is OK'
-      Logger.info("{0} is running".format(label))
-
-    else:
-      # else service state is unknown
-      err_msg = "{0} service check failed; cmd '{1}' returned {2}".format(label, cmd, rc)
-      Logger.error(err_msg)
-      raise ExecutionFailed(err_msg, rc, out, err)

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/__init__.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/__init__.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/__init__.py
deleted file mode 100755
index 8d2bad8..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-#  Licensed to the Apache Software Foundation (ASF) under one or more
-#  contributor license agreements.  See the NOTICE file distributed with
-#  this work for additional information regarding copyright ownership.
-#  The ASF licenses this file to You under the Apache License, Version 2.0
-#  (the "License"); you may not use this file except in compliance with
-#  the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
\ No newline at end of file


[45/50] [abbrv] metron git commit: METRON-1511 Unable to Serialize Profiler Configuration (nickwallen) closes apache/metron#982

Posted by rm...@apache.org.
METRON-1511 Unable to Serialize Profiler Configuration (nickwallen) closes apache/metron#982


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/b5bf9a98
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/b5bf9a98
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/b5bf9a98

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: b5bf9a98725f866a7fee6470a8e763d17cc69ffd
Parents: a41611b
Author: nickwallen <ni...@nickallen.org>
Authored: Mon Apr 23 09:36:06 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Mon Apr 23 09:36:06 2018 -0400

----------------------------------------------------------------------
 .../configuration/profiler/ProfileConfig.java   |  57 ++++++++--
 .../profiler/ProfileResultExpressions.java      |   4 +-
 .../profiler/ProfileTriageExpressions.java      |   8 ++
 .../configuration/profiler/ProfilerConfig.java  |  81 ++++++++++++--
 .../profiler/ProfileConfigTest.java             | 102 ++++++++++++++---
 .../profiler/ProfilerConfigTest.java            | 109 +++++++++++++++++--
 6 files changed, 310 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/b5bf9a98/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
index f5b46e6..f2272c3 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
@@ -18,12 +18,15 @@
 package org.apache.metron.common.configuration.profiler;
 
 import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.core.JsonProcessingException;
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang.builder.ToStringBuilder;
+import org.apache.metron.common.utils.JSONUtils;
 
+import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -264,15 +267,47 @@ public class ProfileConfig implements Serializable {
 
   @Override
   public String toString() {
-    return "ProfileConfig{" +
-            "profile='" + profile + '\'' +
-            ", foreach='" + foreach + '\'' +
-            ", onlyif='" + onlyif + '\'' +
-            ", init=" + init +
-            ", update=" + update +
-            ", groupBy=" + groupBy +
-            ", result=" + result +
-            ", expires=" + expires +
-            '}';
+    return new ToStringBuilder(this)
+            .append("profile", profile)
+            .append("foreach", foreach)
+            .append("onlyif", onlyif)
+            .append("init", init)
+            .append("update", update)
+            .append("groupBy", groupBy)
+            .append("result", result)
+            .append("expires", expires)
+            .toString();
+  }
+
+  /**
+   * Deserialize a {@link ProfileConfig}.
+   *
+   * @param bytes Raw bytes containing a UTF-8 JSON String.
+   * @return The Profile definition.
+   * @throws IOException
+   */
+  public static ProfileConfig fromBytes(byte[] bytes) throws IOException {
+    return JSONUtils.INSTANCE.load(new String(bytes), ProfileConfig.class);
+  }
+
+  /**
+   * Deserialize a {@link ProfileConfig}.
+   *
+   * @param json A String containing JSON.
+   * @return The Profile definition.
+   * @throws IOException
+   */
+  public static ProfileConfig fromJSON(String json) throws IOException {
+    return JSONUtils.INSTANCE.load(json, ProfileConfig.class);
+  }
+
+  /**
+   * Serialize the profile definition to a JSON string.
+   *
+   * @return The Profiler configuration serialized as a JSON string.
+   * @throws JsonProcessingException
+   */
+  public String toJSON() throws JsonProcessingException {
+    return JSONUtils.INSTANCE.toJSON(this, true);
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/b5bf9a98/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileResultExpressions.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileResultExpressions.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileResultExpressions.java
index 82af223..5bcec72 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileResultExpressions.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileResultExpressions.java
@@ -18,7 +18,7 @@
 package org.apache.metron.common.configuration.profiler;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonValue;
 
 /**
  * A Stellar expression that is executed to produce a single
@@ -26,7 +26,6 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
  */
 public class ProfileResultExpressions {
 
-  @JsonIgnore
   private String expression;
 
   @JsonCreator
@@ -34,6 +33,7 @@ public class ProfileResultExpressions {
     this.expression = expression;
   }
 
+  @JsonValue
   public String getExpression() {
     return expression;
   }

http://git-wip-us.apache.org/repos/asf/metron/blob/b5bf9a98/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileTriageExpressions.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileTriageExpressions.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileTriageExpressions.java
index fbe1706..da02cb2 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileTriageExpressions.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileTriageExpressions.java
@@ -17,6 +17,8 @@
  */
 package org.apache.metron.common.configuration.profiler;
 
+import com.fasterxml.jackson.annotation.JsonAnyGetter;
+import com.fasterxml.jackson.annotation.JsonAnySetter;
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonIgnore;
 
@@ -61,10 +63,16 @@ public class ProfileTriageExpressions {
     return expressions.get(name);
   }
 
+  @JsonAnyGetter
   public Map<String, String> getExpressions() {
     return expressions;
   }
 
+  @JsonAnySetter
+  public void setExpressions(Map<String, String> expressions) {
+    this.expressions = expressions;
+  }
+
   @Override
   public String toString() {
     return "ProfileTriageExpressions{" +

http://git-wip-us.apache.org/repos/asf/metron/blob/b5bf9a98/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java
index 0bdb7e2..e4fa99a 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java
@@ -17,6 +17,17 @@
  */
 package org.apache.metron.common.configuration.profiler;
 
+import com.fasterxml.jackson.annotation.JsonGetter;
+import com.fasterxml.jackson.annotation.JsonSetter;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.ToStringBuilder;
+import org.apache.metron.common.utils.JSONUtils;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.codehaus.jackson.map.annotate.JsonSerialize.Inclusion;
+
+import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
@@ -25,6 +36,7 @@ import java.util.Optional;
 /**
  * The configuration object for the Profiler, which may contain many Profile definitions.
  */
+@JsonSerialize(include=Inclusion.NON_NULL)
 public class ProfilerConfig implements Serializable {
 
   /**
@@ -59,10 +71,16 @@ public class ProfilerConfig implements Serializable {
     return this;
   }
 
+  @JsonGetter("timestampField")
+  public String getTimestampFieldForJson() {
+    return timestampField.orElse(null);
+  }
+
   public Optional<String> getTimestampField() {
     return timestampField;
   }
 
+  @JsonSetter("timestampField")
   public void setTimestampField(String timestampField) {
     this.timestampField = Optional.of(timestampField);
   }
@@ -78,25 +96,66 @@ public class ProfilerConfig implements Serializable {
 
   @Override
   public String toString() {
-    return "ProfilerConfig{" +
-            "profiles=" + profiles +
-            ", timestampField='" + timestampField + '\'' +
-            '}';
+    return new ToStringBuilder(this)
+            .append("profiles", profiles)
+            .append("timestampField", timestampField)
+            .toString();
   }
 
   @Override
   public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
+    if (this == o) {
+      return true;
+    }
+
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
     ProfilerConfig that = (ProfilerConfig) o;
-    if (profiles != null ? !profiles.equals(that.profiles) : that.profiles != null) return false;
-    return timestampField != null ? timestampField.equals(that.timestampField) : that.timestampField == null;
+    return new EqualsBuilder()
+            .append(profiles, that.profiles)
+            .append(timestampField, that.timestampField)
+            .isEquals();
   }
 
   @Override
   public int hashCode() {
-    int result = profiles != null ? profiles.hashCode() : 0;
-    result = 31 * result + (timestampField != null ? timestampField.hashCode() : 0);
-    return result;
+    return new HashCodeBuilder(17, 37)
+            .append(profiles)
+            .append(timestampField)
+            .toHashCode();
+  }
+
+  /**
+   * Deserialize a {@link ProfilerConfig}.
+   *
+   * @param bytes Raw bytes containing a UTF-8 JSON String.
+   * @return The Profiler configuration.
+   * @throws IOException
+   */
+  public static ProfilerConfig fromBytes(byte[] bytes) throws IOException {
+    return JSONUtils.INSTANCE.load(new String(bytes), ProfilerConfig.class);
+  }
+
+  /**
+   * Deserialize a {@link ProfilerConfig}.
+   *
+   * @param json A String containing JSON.
+   * @return The Profiler configuration.
+   * @throws IOException
+   */
+  public static ProfilerConfig fromJSON(String json) throws IOException {
+    return JSONUtils.INSTANCE.load(json, ProfilerConfig.class);
+  }
+
+  /**
+   * Serialize a {@link ProfilerConfig} to a JSON string.
+   *
+   * @return The Profiler configuration serialized as a JSON string.
+   * @throws JsonProcessingException
+   */
+  public String toJSON() throws JsonProcessingException {
+    return JSONUtils.INSTANCE.toJSON(this, true);
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/b5bf9a98/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java b/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java
index e178ee0..87dbbc4 100644
--- a/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java
+++ b/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java
@@ -21,7 +21,6 @@ package org.apache.metron.common.configuration.profiler;
 
 import com.fasterxml.jackson.databind.JsonMappingException;
 import org.adrianwalker.multilinestring.Multiline;
-import org.apache.metron.common.utils.JSONUtils;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -51,12 +50,29 @@ public class ProfileConfigTest {
    * The 'onlyif' field should default to 'true' when it is not specified.
    */
   @Test
-  public void testOnlyIfDefault() throws IOException {
-    ProfileConfig profile = JSONUtils.INSTANCE.load(onlyIfDefault, ProfileConfig.class);
+  public void testFromJSONWithOnlyIfDefault() throws IOException {
+    ProfileConfig profile = ProfileConfig.fromJSON(onlyIfDefault);
     assertEquals("true", profile.getOnlyif());
   }
 
   /**
+   * Tests serializing the Profiler configuration to JSON.
+   */
+  @Test
+  public void testToJSONWithOnlyIfDefault() throws Exception {
+
+    // setup a profiler config to serialize
+    ProfileConfig expected = ProfileConfig.fromJSON(onlyIfDefault);
+
+    // execute the test - serialize the config
+    String asJson = expected.toJSON();
+
+    // validate - deserialize to validate
+    ProfileConfig actual = ProfileConfig.fromJSON(asJson);
+    assertEquals(expected, actual);
+  }
+
+  /**
    * {
    *    "foreach": "ip_src_addr",
    *    "update": {},
@@ -70,8 +86,8 @@ public class ProfileConfigTest {
    * The 'name' of the profile must be defined.
    */
   @Test(expected = JsonMappingException.class)
-  public void testNameMissing() throws IOException {
-    JSONUtils.INSTANCE.load(nameMissing, ProfileConfig.class);
+  public void testFromJSONWithNameMissing() throws IOException {
+    ProfileConfig.fromJSON(nameMissing);
   }
 
   /**
@@ -88,8 +104,8 @@ public class ProfileConfigTest {
    * The 'foreach' field must be defined.
    */
   @Test(expected = JsonMappingException.class)
-  public void testForeachMissing() throws IOException {
-    JSONUtils.INSTANCE.load(foreachMissing, ProfileConfig.class);
+  public void testFromJSONWithForeachMissing() throws IOException {
+    ProfileConfig.fromJSON(foreachMissing);
   }
 
   /**
@@ -106,8 +122,8 @@ public class ProfileConfigTest {
    * The 'result' field must be defined.
    */
   @Test(expected = JsonMappingException.class)
-  public void testResultMissing() throws IOException {
-    JSONUtils.INSTANCE.load(resultMissing, ProfileConfig.class);
+  public void testFromJSONWithResultMissing() throws IOException {
+    ProfileConfig.fromJSON(resultMissing);
   }
 
   /**
@@ -125,8 +141,8 @@ public class ProfileConfigTest {
    * The 'result' field must contain the 'profile' expression used to store the profile measurement.
    */
   @Test(expected = JsonMappingException.class)
-  public void testResultMissingProfileExpression() throws IOException {
-    JSONUtils.INSTANCE.load(resultMissingProfileExpression, ProfileConfig.class);
+  public void testFromJSONWithResultMissingProfileExpression() throws IOException {
+    ProfileConfig.fromJSON(resultMissingProfileExpression);
   }
 
   /**
@@ -145,8 +161,8 @@ public class ProfileConfigTest {
    * the 'profile' expression used to store the profile measurement.
    */
   @Test
-  public void testResultWithExpression() throws IOException {
-    ProfileConfig profile = JSONUtils.INSTANCE.load(resultWithExpression, ProfileConfig.class);
+  public void testFromJSONWithResultWithExpression() throws IOException {
+    ProfileConfig profile = ProfileConfig.fromJSON(resultWithExpression);
     assertEquals("2 + 2", profile.getResult().getProfileExpressions().getExpression());
 
     // no triage expressions expected
@@ -154,6 +170,23 @@ public class ProfileConfigTest {
   }
 
   /**
+   * Tests serializing the Profiler configuration to JSON.
+   */
+  @Test
+  public void testToJSONWithResultWithExpression() throws Exception {
+
+    // setup a profiler config to serialize
+    ProfileConfig expected = ProfileConfig.fromJSON(resultWithExpression);
+
+    // execute the test - serialize the config
+    String asJson = expected.toJSON();
+
+    // validate - deserialize to validate
+    ProfileConfig actual = ProfileConfig.fromJSON(asJson);
+    assertEquals(expected, actual);
+  }
+
+  /**
    * {
    *    "profile": "test",
    *    "foreach": "ip_src_addr",
@@ -170,8 +203,8 @@ public class ProfileConfigTest {
    * The result's 'triage' field is optional.
    */
   @Test
-  public void testResultWithProfileOnly() throws IOException {
-    ProfileConfig profile = JSONUtils.INSTANCE.load(resultWithProfileOnly, ProfileConfig.class);
+  public void testFromJSONWithResultWithProfileOnly() throws IOException {
+    ProfileConfig profile = ProfileConfig.fromJSON(resultWithProfileOnly);
     assertEquals("2 + 2", profile.getResult().getProfileExpressions().getExpression());
 
     // no triage expressions expected
@@ -179,6 +212,23 @@ public class ProfileConfigTest {
   }
 
   /**
+   * Tests serializing the Profiler configuration to JSON.
+   */
+  @Test
+  public void testToJSONWithProfileOnly() throws Exception {
+
+    // setup a profiler config to serialize
+    ProfileConfig expected = ProfileConfig.fromJSON(resultWithProfileOnly);
+
+    // execute the test - serialize the config
+    String asJson = expected.toJSON();
+
+    // validate - deserialize to validate
+    ProfileConfig actual = ProfileConfig.fromJSON(asJson);
+    assertEquals(expected, actual);
+  }
+
+  /**
    * {
    *    "profile": "test",
    *    "foreach": "ip_src_addr",
@@ -199,10 +249,28 @@ public class ProfileConfigTest {
    * The result's 'triage' field can contain many named expressions.
    */
   @Test
-  public void testResultWithTriage() throws IOException {
-    ProfileConfig profile = JSONUtils.INSTANCE.load(resultWithTriage, ProfileConfig.class);
+  public void testFromJSONWithResultWithTriage() throws IOException {
+    ProfileConfig profile = ProfileConfig.fromJSON(resultWithTriage);
 
     assertEquals("4 + 4", profile.getResult().getTriageExpressions().getExpression("eight"));
     assertEquals("8 + 8", profile.getResult().getTriageExpressions().getExpression("sixteen"));
   }
+
+  /**
+   * Tests serializing the Profiler configuration to JSON.
+   */
+  @Test
+  public void testToJSONWithResultWithTriage() throws Exception {
+
+    // setup a profiler config to serialize
+    ProfileConfig expected = ProfileConfig.fromJSON(resultWithTriage);
+
+    // execute the test - serialize the config
+    String asJson = expected.toJSON();
+
+    // validate - deserialize to validate
+    ProfileConfig actual = ProfileConfig.fromJSON(asJson);
+    assertEquals(expected, actual);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/b5bf9a98/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfilerConfigTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfilerConfigTest.java b/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfilerConfigTest.java
index 2e73cde..1a11811 100644
--- a/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfilerConfigTest.java
+++ b/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfilerConfigTest.java
@@ -20,7 +20,6 @@
 package org.apache.metron.common.configuration.profiler;
 
 import org.adrianwalker.multilinestring.Multiline;
-import org.apache.metron.common.utils.JSONUtils;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -48,14 +47,41 @@ public class ProfilerConfigTest {
    * }
    */
   @Multiline
+  private String profile;
+
+  /**
+   * Tests deserializing the Profiler configuration using the fromJSON(...) method.
+   */
+  @Test
+  public void testFromJSON() throws IOException {
+    ProfilerConfig conf = ProfilerConfig.fromJSON(profile);
+
+    assertFalse(conf.getTimestampField().isPresent());
+    assertEquals(1, conf.getProfiles().size());
+  }
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "profile1",
+   *        "foreach": "ip_src_addr",
+   *        "init":   { "count": "0" },
+   *        "update": { "count": "count + 1" },
+   *        "result":   "count"
+   *      }
+   *   ]
+   * }
+   */
+  @Multiline
   private String noTimestampField;
 
   /**
    * If no 'timestampField' is defined, it should not be present by default.
    */
   @Test
-  public void testNoTimestampField() throws IOException {
-    ProfilerConfig conf = JSONUtils.INSTANCE.load(noTimestampField, ProfilerConfig.class);
+  public void testFromJSONWithNoTimestampField() throws IOException {
+    ProfilerConfig conf = ProfilerConfig.fromJSON(noTimestampField);
     assertFalse(conf.getTimestampField().isPresent());
   }
 
@@ -77,11 +103,12 @@ public class ProfilerConfigTest {
   private String timestampField;
 
   /**
-   * If no 'timestampField' is defined, it should not be present by default.
+   * Tests deserializing the Profiler configuration when the timestamp field is defined.
    */
   @Test
-  public void testTimestampField() throws IOException {
-    ProfilerConfig conf = JSONUtils.INSTANCE.load(timestampField, ProfilerConfig.class);
+  public void testFromJSONWithTimestampField() throws IOException {
+    ProfilerConfig conf = ProfilerConfig.fromJSON(timestampField);
+
     assertTrue(conf.getTimestampField().isPresent());
   }
 
@@ -108,13 +135,75 @@ public class ProfilerConfigTest {
   @Multiline
   private String twoProfiles;
 
+  @Test
+  public void testFromJSONTwoProfiles() throws IOException {
+    ProfilerConfig conf = ProfilerConfig.fromJSON(twoProfiles);
+
+    assertEquals(2, conf.getProfiles().size());
+    assertFalse(conf.getTimestampField().isPresent());
+  }
+
   /**
-   * The 'onlyif' field should default to 'true' when it is not specified.
+   * Tests serializing the Profiler configuration to JSON.
    */
   @Test
-  public void testTwoProfiles() throws IOException {
-    ProfilerConfig conf = JSONUtils.INSTANCE.load(twoProfiles, ProfilerConfig.class);
-    assertEquals(2, conf.getProfiles().size());
+  public void testToJSON() throws Exception {
+
+    // setup a profiler config to serialize
+    ProfilerConfig expected = ProfilerConfig.fromJSON(profile);
+
+    // execute the test - serialize the config
+    String asJson = expected.toJSON();
+
+    // validate - deserialize to validate
+    ProfilerConfig actual = ProfilerConfig.fromJSON(asJson);
+    assertEquals(expected, actual);
   }
 
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "profile1",
+   *        "foreach": "ip_src_addr",
+   *        "init":   { "count": "0" },
+   *        "update": { "count": "count + 1" },
+   *        "result": {
+   *          "profile": "count",
+   *          "triage" : { "count": "count" }
+   *        }
+   *      }
+   *   ]
+   * }
+   */
+  @Multiline
+  private String profileWithTriageExpression;
+
+  @Test
+  public void testToJSONWithTriageExpression() throws Exception {
+
+    // setup a profiler config to serialize
+    ProfilerConfig expected = ProfilerConfig.fromJSON(profileWithTriageExpression);
+
+    // execute the test - serialize the config
+    String asJson = expected.toJSON();
+
+    // validate - deserialize to validate
+    ProfilerConfig actual = ProfilerConfig.fromJSON(asJson);
+    assertEquals(expected, actual);
+  }
+
+  @Test
+  public void testToJSONWithTwoProfiles() throws Exception {
+
+    // setup a profiler config to serialize
+    ProfilerConfig expected = ProfilerConfig.fromJSON(twoProfiles);
+
+    // execute the test - serialize the config
+    String asJson = expected.toJSON();
+
+    // validate - deserialize to validate
+    ProfilerConfig actual = ProfilerConfig.fromJSON(asJson);
+    assertEquals(expected, actual);
+  }
 }


[30/50] [abbrv] metron git commit: METRON-1510 Update Metron website to include info about github update subscription (anandsubbu) closes apache/metron#981

Posted by rm...@apache.org.
METRON-1510 Update Metron website to include info about github update subscription (anandsubbu) closes apache/metron#981


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/438893b7
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/438893b7
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/438893b7

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 438893b78c852e34d7edb965840500a78503d299
Parents: ed50d48
Author: anandsubbu <an...@gmail.com>
Authored: Wed Apr 11 11:03:37 2018 +0530
Committer: anandsubbu <an...@apache.org>
Committed: Wed Apr 11 11:03:37 2018 +0530

----------------------------------------------------------------------
 site/community/index.md | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/438893b7/site/community/index.md
----------------------------------------------------------------------
diff --git a/site/community/index.md b/site/community/index.md
index 7c09b14..b222ba3 100644
--- a/site/community/index.md
+++ b/site/community/index.md
@@ -121,11 +121,15 @@ title: Apache Metron Community
             <h4> General & Public Discussion </h4>
                 [<a href="mailto:user-subscribe@metron.apache.org">Subscribe</a>]
                 [<a href="mailto:user-unsubscribe@metron.apache.org">Unsubscribe</a>]
-                [<a href="http://mail-archives.apache.org/mod_mbox/metron-user/">Archives</a>]
+                [<a href="https://lists.apache.org/list.html?user@metron.apache.org">Archives</a>]
             <h4> Code & Documentation Change </h4>
                 [<a href="mailto:dev-subscribe@metron.apache.org">Subscribe</a>]
                 [<a href="mailto:dev-unsubscribe@metron.apache.org">Unsubscribe</a>]
-                [<a href="http://mail-archives.apache.org/mod_mbox/metron-dev/">Archives</a>]
+                [<a href="https://lists.apache.org/list.html?dev@metron.apache.org">Archives</a>]
+            <h4> Issues & Github updates </h4>
+                [<a href="mailto:issues-subscribe@metron.apache.org">Subscribe</a>]
+                [<a href="mailto:issues-unsubscribe@metron.apache.org">Unsubscribe</a>]
+                [<a href="https://lists.apache.org/list.html?issues@metron.apache.org">Archives</a>]
         </div>
 </section>
 


[17/50] [abbrv] metron git commit: METRON-1501 Parser messages that fail to validate are dropped silently (cestella via justinleet) closes apache/metron#972

Posted by rm...@apache.org.
METRON-1501 Parser messages that fail to validate are dropped silently (cestella via justinleet) closes apache/metron#972


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/0d847cf5
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/0d847cf5
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/0d847cf5

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 0d847cf5f91dc0d3b3b6838eb4b4de4aa2cf2fec
Parents: 19b237d
Author: cestella <ce...@gmail.com>
Authored: Tue Apr 3 10:29:19 2018 -0400
Committer: leet <le...@apache.org>
Committed: Tue Apr 3 10:29:19 2018 -0400

----------------------------------------------------------------------
 metron-platform/metron-parsers/README.md        | 29 +++++++++++++++++++-
 .../apache/metron/parsers/bolt/ParserBolt.java  | 17 ++++++++----
 .../metron/parsers/bolt/ParserBoltTest.java     | 16 ++++++++---
 3 files changed, 51 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/0d847cf5/metron-platform/metron-parsers/README.md
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/README.md b/metron-platform/metron-parsers/README.md
index 3d9fdfe..6b9d62e 100644
--- a/metron-platform/metron-parsers/README.md
+++ b/metron-platform/metron-parsers/README.md
@@ -45,7 +45,34 @@ There are two general types types of parsers:
       * `ERROR` : Throw an error when a multidimensional map is encountered
     * `jsonpQuery` : A [JSON Path](#json_path) query string. If present, the result of the JSON Path query should be a list of messages. This is useful if you have a JSON document which contains a list or array of messages embedded in it, and you do not have another means of splitting the message.
     * A field called `timestamp` is expected to exist and, if it does not, then current time is inserted.  
-    
+
+## Parser Error Routing
+
+Currently, we have a few mechanisms for either deferring processing of
+messages or marking messages as invalid.
+
+### Invalidation Errors
+
+There are two reasons a message will be marked as invalid:
+* Fail [global validation](../metron-common#validation-framework)
+* Fail the parser's validate function (generally that means to not have a `timestamp` field or a `original_string` field.
+
+Those messages which are marked as invalid are sent to the error queue
+with an indication that they are invalid in the error message.
+
+### Parser Errors
+
+Errors, which are defined as unexpected exceptions happening during the
+parse, are sent along to the error queue with a message indicating that
+there was an error in parse along with a stacktrace.  This is to
+distinguish from the invalid messages.
+ 
+## Filtered
+
+One can also filter a message by specifying a `filterClassName` in the
+parser config.  Filtered messages are just dropped rather than passed
+through.
+   
 ## Parser Architecture
 
 ![Architecture](parser_arch.png)

http://git-wip-us.apache.org/repos/asf/metron/blob/0d847cf5/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java b/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java
index 6fc4ed7..e996f14 100644
--- a/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java
+++ b/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java
@@ -193,23 +193,28 @@ public class ParserBolt extends ConfiguredParserBolt implements Serializable {
             message.put(Constants.GUID, UUID.randomUUID().toString());
           }
 
-          if (parser.validate(message) && (filter == null || filter.emitTuple(message, stellarContext))) {
-            numWritten++;
-            List<FieldValidator> failedValidators = getFailedValidators(message, fieldValidations);
-            if(failedValidators.size() > 0) {
+          if (filter == null || filter.emitTuple(message, stellarContext)) {
+            boolean isInvalid = !parser.validate(message);
+            List<FieldValidator> failedValidators = null;
+            if(!isInvalid) {
+              failedValidators = getFailedValidators(message, fieldValidations);
+              isInvalid = !failedValidators.isEmpty();
+            }
+            if( isInvalid) {
               MetronError error = new MetronError()
                       .withErrorType(Constants.ErrorType.PARSER_INVALID)
                       .withSensorType(getSensorType())
                       .addRawMessage(message);
-              Set<String> errorFields = failedValidators.stream()
+              Set<String> errorFields = failedValidators == null?null:failedValidators.stream()
                       .flatMap(fieldValidator -> fieldValidator.getInput().stream())
                       .collect(Collectors.toSet());
-              if (!errorFields.isEmpty()) {
+              if (errorFields != null && !errorFields.isEmpty()) {
                 error.withErrorFields(errorFields);
               }
               ErrorUtils.handleError(collector, error);
             }
             else {
+              numWritten++;
               writer.write(getSensorType(), tuple, message, getConfigurations(), messageGetStrategy);
             }
           }

http://git-wip-us.apache.org/repos/asf/metron/blob/0d847cf5/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/bolt/ParserBoltTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/bolt/ParserBoltTest.java b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/bolt/ParserBoltTest.java
index 3316b32..6439b2b 100644
--- a/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/bolt/ParserBoltTest.java
+++ b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/bolt/ParserBoltTest.java
@@ -118,6 +118,9 @@ public class ParserBoltTest extends BaseBoltTest {
   }
 
   private static ConfigurationsUpdater<ParserConfigurations> createUpdater() {
+    return createUpdater(Optional.empty());
+  }
+  private static ConfigurationsUpdater<ParserConfigurations> createUpdater(Optional<Integer> batchSize) {
     return new ConfigurationsUpdater<ParserConfigurations>(null, null) {
       @Override
       public void update(CuratorFramework client, String path, byte[] data) throws IOException { }
@@ -153,6 +156,9 @@ public class ParserBoltTest extends BaseBoltTest {
               @Override
               public Map<String, Object> getParserConfig() {
                 return new HashMap<String, Object>() {{
+                  if(batchSize.isPresent()) {
+                    put(IndexingConfigurations.BATCH_SIZE_CONF, batchSize.get());
+                  }
                 }};
               }
             };
@@ -502,9 +508,9 @@ public void testImplicitBatchOfOne() throws Exception {
     ParserBolt parserBolt = new ParserBolt("zookeeperUrl", sensorType, parser, new WriterHandler(batchWriter)) {
       @Override
       protected ConfigurationsUpdater<ParserConfigurations> createUpdater() {
-        return ParserBoltTest.createUpdater();
+        return ParserBoltTest.createUpdater(Optional.of(5));
       }
-    };
+    } ;
 
     parserBolt.setCuratorFramework(client);
     parserBolt.setZKCache(cache);
@@ -524,6 +530,7 @@ public void testImplicitBatchOfOne() throws Exception {
     writeNonBatch(outputCollector, parserBolt, t3);
     writeNonBatch(outputCollector, parserBolt, t4);
     parserBolt.execute(t5);
+    verify(batchWriter, times(1)).write(eq(sensorType), any(WriterConfiguration.class), eq(tuples), any());
     verify(outputCollector, times(1)).ack(t1);
     verify(outputCollector, times(1)).ack(t2);
     verify(outputCollector, times(1)).ack(t3);
@@ -540,7 +547,7 @@ public void testImplicitBatchOfOne() throws Exception {
     ParserBolt parserBolt = new ParserBolt("zookeeperUrl", sensorType, parser, new WriterHandler(batchWriter)) {
       @Override
       protected ConfigurationsUpdater<ParserConfigurations> createUpdater() {
-        return ParserBoltTest.createUpdater();
+        return ParserBoltTest.createUpdater(Optional.of(5));
       }
     };
 
@@ -552,7 +559,7 @@ public void testImplicitBatchOfOne() throws Exception {
 
     doThrow(new Exception()).when(batchWriter).write(any(), any(), any(), any());
     when(parser.validate(any())).thenReturn(true);
-    when(parser.parse(any())).thenReturn(ImmutableList.of(new JSONObject()));
+    when(parser.parseOptional(any())).thenReturn(Optional.of(ImmutableList.of(new JSONObject())));
     when(filter.emitTuple(any(), any(Context.class))).thenReturn(true);
     parserBolt.withMessageFilter(filter);
     parserBolt.execute(t1);
@@ -560,6 +567,7 @@ public void testImplicitBatchOfOne() throws Exception {
     parserBolt.execute(t3);
     parserBolt.execute(t4);
     parserBolt.execute(t5);
+    verify(batchWriter, times(1)).write(any(), any(), any(), any());
     verify(outputCollector, times(1)).ack(t1);
     verify(outputCollector, times(1)).ack(t2);
     verify(outputCollector, times(1)).ack(t3);


[25/50] [abbrv] metron git commit: METRON-1449 Set Zookeeper URL for Stellar Running in Zeppelin Notebook (nickwallen) closes apache/metron#931

Posted by rm...@apache.org.
METRON-1449 Set Zookeeper URL for Stellar Running in Zeppelin Notebook (nickwallen) closes apache/metron#931


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/ab4f8e65
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/ab4f8e65
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/ab4f8e65

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: ab4f8e65e13aaea3a10491290a7411b3b6dc955e
Parents: 0ab39a3
Author: nickwallen <ni...@nickallen.org>
Authored: Fri Apr 6 16:40:01 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Fri Apr 6 16:40:01 2018 -0400

----------------------------------------------------------------------
 metron-platform/metron-management/pom.xml       |   1 -
 .../shell/DefaultStellarShellExecutor.java      |  32 +++---
 .../shell/DefaultStellarShellExecutorTest.java  |  11 ++
 metron-stellar/stellar-zeppelin/README.md       |  80 ++++----------
 metron-stellar/stellar-zeppelin/pom.xml         |  12 +++
 .../stellar/zeppelin/StellarInterpreter.java    |  95 ++++++++++-------
 .../zeppelin/StellarInterpreterProperty.java    |  79 ++++++++++++++
 .../StellarInterpreterPropertyTest.java         |  62 +++++++++++
 .../zeppelin/StellarInterpreterTest.java        |  60 ++++++++++-
 .../integration/ConfigUploadComponent.java      |  82 +++++++++++++++
 .../StellarInterpreterIntegrationTest.java      | 104 +++++++++++++++++++
 11 files changed, 501 insertions(+), 117 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-platform/metron-management/pom.xml
----------------------------------------------------------------------
diff --git a/metron-platform/metron-management/pom.xml b/metron-platform/metron-management/pom.xml
index c185662..962fd46 100644
--- a/metron-platform/metron-management/pom.xml
+++ b/metron-platform/metron-management/pom.xml
@@ -183,7 +183,6 @@
             </exclusions>
         </dependency>
     </dependencies>
-
     <build>
         <plugins>
             <plugin>

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java
index 3f2c495..781a0cf 100644
--- a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java
+++ b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutor.java
@@ -52,6 +52,7 @@ import java.io.ByteArrayInputStream;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -271,10 +272,7 @@ public class DefaultStellarShellExecutor implements StellarShellExecutor {
       globals = (Map<String, Object>) capability.get();
 
     } else {
-      // if it does not exist, create it.  this creates the global config for the current stellar executor
-      // session only.  this does not change the global config maintained externally in zookeeper
-      globals = new HashMap<>();
-      getContext().addCapability(GLOBAL_CONFIG, () -> globals);
+      throw new IllegalStateException("'GLOBAL_CONFIG' is missing");
     }
 
     return globals;
@@ -344,21 +342,25 @@ public class DefaultStellarShellExecutor implements StellarShellExecutor {
    * @param zkClient An optional Zookeeper client.
    */
   private Context createContext(Properties properties, Optional<CuratorFramework> zkClient) throws Exception {
+    Context.Builder contextBuilder = new Context.Builder();
+    Map<String, Object> globals;
+    if (zkClient.isPresent()) {
 
-    Context.Builder contextBuilder = new Context.Builder()
-            .with(SHELL_VARIABLES, () -> variables)
-            .with(STELLAR_CONFIG, () -> properties);
+      // fetch globals from zookeeper
+      globals = fetchGlobalConfig(zkClient.get());
+      contextBuilder.with(ZOOKEEPER_CLIENT, () -> zkClient.get());
 
-    // load global configuration from zookeeper
-    if (zkClient.isPresent()) {
-      Map<String, Object> global = fetchGlobalConfig(zkClient.get());
-      contextBuilder
-              .with(GLOBAL_CONFIG, () -> global)
-              .with(ZOOKEEPER_CLIENT, () -> zkClient.get())
-              .with(STELLAR_CONFIG, () -> getStellarConfig(global, properties));
+    } else {
+
+      // use empty globals to allow a user to '%define' their own
+      globals = new HashMap<>();
     }
 
-    return contextBuilder.build();
+    return contextBuilder
+            .with(SHELL_VARIABLES, () -> variables)
+            .with(GLOBAL_CONFIG, () -> globals)
+            .with(STELLAR_CONFIG, () -> getStellarConfig(globals, properties))
+            .build();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutorTest.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutorTest.java b/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutorTest.java
index ebba84f..23b0204 100644
--- a/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutorTest.java
+++ b/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/shell/DefaultStellarShellExecutorTest.java
@@ -295,4 +295,15 @@ public class DefaultStellarShellExecutorTest {
     assertTrue(result.getValue().isPresent());
     assertEquals("", result.getValue().get());
   }
+
+  /**
+   * If the executor is initialized without a connection to Zookeeper, the globals should be
+   * defined, but empty.  This allows a user to '%define' their own with magic commands even
+   * without Zookeeper.
+   */
+  @Test
+  public void testEmptyGlobalsWithNoZookeeper() {
+    assertNotNull(executor.getGlobalConfig());
+    assertEquals(0, executor.getGlobalConfig().size());
+  }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-zeppelin/README.md
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-zeppelin/README.md b/metron-stellar/stellar-zeppelin/README.md
index 1190658..60dcb3a 100644
--- a/metron-stellar/stellar-zeppelin/README.md
+++ b/metron-stellar/stellar-zeppelin/README.md
@@ -48,77 +48,27 @@ To install the Stellar Interpreter in your Apache Zeppelin installation, follow
     mvn clean install -DskipTests
     ```
 
-1. If you do not already have Zeppelin installed, [download and unpack Apache Zeppelin](https://zeppelin.apache.org/download.html).  The directory in which you unpack Zeppelin will be referred to as `$ZEPPELIN_HOME`.
-
-1. If Zeppelin was already installed, make sure that it is not running.
-
-1. Create a settings directory for the Stellar interpreter.
+1. If you do not already have Zeppelin installed, [download and unpack Apache Zeppelin](https://zeppelin.apache.org/download.html).  Then change directories to the root of your Zeppelin download.
 
     ```
-    mkdir $ZEPPELIN_HOME/interpreter/stellar
-    cat <<EOF > $ZEPPELIN_HOME/interpreter/stellar/interpreter-setting.json
-    [
-      {
-        "group": "stellar",
-        "name": "stellar",
-        "className": "org.apache.metron.stellar.zeppelin.StellarInterpreter",
-        "properties": {
-        }
-      }
-    ]
-    EOF
+    cd $ZEPPELIN_HOME
     ```
 
-1. Create a Zeppelin Site file (`$ZEPPELIN_HOME/conf/zeppelin-site.xml`).
-
-    ```
-    cp $ZEPPELIN_HOME/conf/zeppelin-site.xml.template $ZEPPELIN_HOME/conf/zeppelin-site.xml
-    ```
+1. Use Zeppelin's installation utility to install the Stellar Interpreter.
 
-1. In the Zeppelin site file, add `org.apache.metron.stellar.zeppelin.StellarInterpreter` to the comma-separated list of Zeppelin interpreters under the `zeppelin.interpreters` property.
+    If Zeppelin was already installed, make sure that it is stopped before running this command.  Update the version, '0.4.3' in the example below, to whatever is appropriate for your environment.
 
-    The property will likely look-like the following.
     ```
-    <property>
-      <name>zeppelin.interpreters</name>
-      <value>org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.rinterpreter.RRepl,org.apache.zeppelin.rinterpreter.KnitR,org.apache.zeppelin.spark.SparkRInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.file.HDFSFileInterpreter,org.apache.zeppelin.flink.FlinkInterpreter,,org.apache.zeppelin.python.PythonInterpreter,org.apache.zeppelin.python.PythonInterpreterPandasSql,org.apache.zeppelin.python.PythonCondaInterpreter,org.apache.zeppelin.python.PythonDockerInterpreter,org.apache.zeppelin.lens.LensInterpreter,org.apache.zeppelin.ignite.IgniteInterpreter,org.apache.zeppelin.ignite.IgniteSqlInterpreter,org.apache.zeppelin.cassandra.CassandraInterpreter,org.apache.zeppelin.geode.GeodeOqlInterpreter,org.apache.zeppelin.postgresql.PostgreS
 qlInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.kylin.KylinInterpreter,org.apache.zeppelin.elasticsearch.ElasticsearchInterpreter,org.apache.zeppelin.scalding.ScaldingInterpreter,org.apache.zeppelin.alluxio.AlluxioInterpreter,org.apache.zeppelin.hbase.HbaseInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivyPySpark3Interpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter,org.apache.zeppelin.bigquery.BigQueryInterpreter,org.apache.zeppelin.beam.BeamInterpreter,org.apache.zeppelin.pig.PigInterpreter,org.apache.zeppelin.pig.PigQueryInterpreter,org.apache.zeppelin.scio.ScioInterpreter,org.apache.metron.stellar.zeppelin.StellarInterpreter</value>
-      <description>Comma separated interpreter configurations. First interpreter become a default</description>
-    </property>
+    bin/install-interpreter.sh --name stellar --artifact org.apache.metron:stellar-zeppelin:0.4.3
     ```
 
 1. Start Zeppelin.  
 
     ```
-    $ZEPPELIN_HOME/bin/zeppelin-daemon.sh start
+    bin/zeppelin-daemon.sh start
     ```
 
-1. Navigate to Zeppelin running at [http://localhost:8080/](http://localhost:8080/).
-
-1. Register the Stellar interpreter in Zeppelin.
-
-    1. Click on the top-right menu item labelled "Anonymous" then choose "Interpreter" in the drop-down that opens.    
-
-1. Configure the Stellar interpreter.
-
-    1. Click on '**+ Create**' near the top-right.
-
-    1. Define the following values.
-        * **Interpreter Name** = `stellar`
-        * **Interpreter Group** = `stellar`
-
-    1. Under **Options**, set the following values.
-        * The interpreter will be instantiated **Per Note**  in **isolated** process.
-
-    1. Under **Dependencies**, define the following fields, then click the "+" icon.  Replace the Metron version as required.
-        * **Artifact** = `org.apache.metron:stellar-zeppelin:0.4.3`
-
-    1. Click "Save"
-
-1. Wait for the intrepreter to start.
-
-    1. Near the title '**stellar**', will be a status icon.  This will indicate that it is downloading the dependencies.  
-
-    1. Once the icon is shown as green, the interpreter is ready to work.
+1. Navigate to Zeppelin running at [http://localhost:8080/](http://localhost:8080/).  The Stellar Interpreter should be ready for use with a basic set of functions.
 
 Usage
 -----
@@ -141,19 +91,25 @@ Usage
 
 1. In the next block, check which functions are available to you.
 
+    When executing Stellar's magic functions, you must explicitly define which interpreter should be used in the code block.  If you define 'stellar' as the default interpreter when creating a notebook, then this is only required when using Stellar's magic functions.
+
     ```
+    %stellar
+
     %functions
     ```
 
     You will **only** 'see' the functions defined within `stellar-common` since that is the only library that we added to the interpreter.  
 
-1. To see how additional functions can be added, go back to the Stellar interpreter configuration and add another dependency as follows.
+1. Add additional Stellar functions to your session.
 
-    ```
-    org.apache.metron:metron-statistics:0.4.3
-    ```
+    1. Go back to the Stellar interpreter configuration and add another dependency as follows.
+
+        ```
+        org.apache.metron:metron-statistics:0.4.3
+        ```
 
-    Reload the Stellar interpreter and run `%functions` again.  You will see the additional functions defined within the `metron-statistics` project.
+    1. Go back to your notebook and run `%functions` again.  You will now see the additional functions defined within the `metron-statistics` project.
 
 1. Auto-completion is also available for Stellar expressions.  
 

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-zeppelin/pom.xml
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-zeppelin/pom.xml b/metron-stellar/stellar-zeppelin/pom.xml
index 7809342..732c62b 100644
--- a/metron-stellar/stellar-zeppelin/pom.xml
+++ b/metron-stellar/stellar-zeppelin/pom.xml
@@ -35,6 +35,18 @@
             <version>${project.parent.version}</version>
         </dependency>
         <dependency>
+            <groupId>org.apache.metron</groupId>
+            <artifactId>metron-integration-test</artifactId>
+            <version>${project.parent.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.metron</groupId>
+            <artifactId>metron-common</artifactId>
+            <version>${project.parent.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
             <groupId>org.mockito</groupId>
             <artifactId>mockito-all</artifactId>
             <version>${global_mockito_version}</version>

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-zeppelin/src/main/java/org/apache/metron/stellar/zeppelin/StellarInterpreter.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-zeppelin/src/main/java/org/apache/metron/stellar/zeppelin/StellarInterpreter.java b/metron-stellar/stellar-zeppelin/src/main/java/org/apache/metron/stellar/zeppelin/StellarInterpreter.java
index 58287dc..5a7a175 100644
--- a/metron-stellar/stellar-zeppelin/src/main/java/org/apache/metron/stellar/zeppelin/StellarInterpreter.java
+++ b/metron-stellar/stellar-zeppelin/src/main/java/org/apache/metron/stellar/zeppelin/StellarInterpreter.java
@@ -18,16 +18,6 @@
 
 package org.apache.metron.stellar.zeppelin;
 
-import static org.apache.zeppelin.interpreter.InterpreterResult.Code.ERROR;
-import static org.apache.zeppelin.interpreter.InterpreterResult.Code.SUCCESS;
-import static org.apache.zeppelin.interpreter.InterpreterResult.Type.TEXT;
-
-import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-import java.util.Properties;
-
 import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.metron.stellar.common.shell.DefaultStellarAutoCompleter;
 import org.apache.metron.stellar.common.shell.DefaultStellarShellExecutor;
@@ -41,6 +31,16 @@ import org.apache.zeppelin.interpreter.thrift.InterpreterCompletion;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.Properties;
+
+import static org.apache.zeppelin.interpreter.InterpreterResult.Code.ERROR;
+import static org.apache.zeppelin.interpreter.InterpreterResult.Code.SUCCESS;
+import static org.apache.zeppelin.interpreter.InterpreterResult.Type.TEXT;
+
 /**
  * A Zeppelin Interpreter for Stellar.
  */
@@ -65,16 +65,21 @@ public class StellarInterpreter extends Interpreter {
 
   public StellarInterpreter(Properties properties) {
     super(properties);
-    this.autoCompleter = new DefaultStellarAutoCompleter();
   }
 
   @Override
   public void open() {
     try {
-      executor = createExecutor();
+      // create the auto-completer
+      this.autoCompleter = new DefaultStellarAutoCompleter();
+
+      // create the stellar executor
+      Properties props = getProperty();
+      this.executor = createExecutor(props);
 
     } catch (Exception e) {
       LOG.error("Unable to create a StellarShellExecutor", e);
+      throw new RuntimeException(e);
     }
   }
 
@@ -85,36 +90,55 @@ public class StellarInterpreter extends Interpreter {
 
   @Override
   public InterpreterResult interpret(final String input, InterpreterContext context) {
-    InterpreterResult result;
+    InterpreterResult result = new InterpreterResult(SUCCESS, TEXT, "");
+
     try {
 
-      // execute the input
-      StellarResult stellarResult = executor.execute(input);
-      if(stellarResult.isSuccess()) {
+      // allow separate expressions on each line
+      String[] expressions = input.split(System.lineSeparator());
+      for (String expression : expressions) {
+        result = execute(expression);
+      }
+
+    } catch(Throwable t){
 
-        // on success - if no result, use a blank value
-        Object value = stellarResult.getValue().orElse("");
-        String text = value.toString();
-        result = new InterpreterResult(SUCCESS, TEXT, text);
+      // unexpected exception
+      String message = getErrorMessage(Optional.of(t), input);
+      result = new InterpreterResult(ERROR, TEXT, message);
+    }
 
-      } else if(stellarResult.isError()) {
+    // result is from the last expression that was executed
+    return result;
+  }
 
-        // an error occurred
-        Optional<Throwable> e = stellarResult.getException();
-        String message = getErrorMessage(e, input);
-        result = new InterpreterResult(ERROR, TEXT, message);
+  /**
+   * Execute a single Stellar expression.
+   * @param expression The Stellar expression to execute.
+   * @return The result of execution.
+   */
+  private InterpreterResult execute(final String expression) {
+    InterpreterResult result;
 
-      } else {
+    // execute the expression
+    StellarResult stellarResult = executor.execute(expression);
+    if (stellarResult.isSuccess()) {
 
-        // should never happen
-        throw new IllegalStateException("Unexpected error. result=" + stellarResult);
-      }
+      // on success - if no result, use a blank value
+      Object value = stellarResult.getValue().orElse("");
+      String text = value.toString();
+      result = new InterpreterResult(SUCCESS, TEXT, text);
 
-    } catch(Throwable t) {
+    } else if (stellarResult.isError()) {
 
-      // unexpected exception
-      String message = getErrorMessage(Optional.of(t), input);
+      // an error occurred
+      Optional<Throwable> e = stellarResult.getException();
+      String message = getErrorMessage(e, expression);
       result = new InterpreterResult(ERROR, TEXT, message);
+
+    } else {
+
+      // should never happen
+      throw new IllegalStateException("Unexpected error. result=" + stellarResult);
     }
 
     return result;
@@ -176,10 +200,11 @@ public class StellarInterpreter extends Interpreter {
    * Create an executor that will run the Stellar code for the Zeppelin Notebook.
    * @return The stellar executor.
    */
-  private StellarShellExecutor createExecutor() throws Exception {
+  private StellarShellExecutor createExecutor(Properties properties) throws Exception {
 
-    Properties props = getProperty();
-    StellarShellExecutor executor = new DefaultStellarShellExecutor(props, Optional.empty());
+    // a zookeeper URL may be defined
+    String zookeeperURL = StellarInterpreterProperty.ZOOKEEPER_URL.get(properties, String.class);
+    StellarShellExecutor executor = new DefaultStellarShellExecutor(properties, Optional.ofNullable(zookeeperURL));
 
     // register the auto-completer to be notified
     executor.addSpecialListener((magic) -> autoCompleter.addCandidateFunction(magic.getCommand()));

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-zeppelin/src/main/java/org/apache/metron/stellar/zeppelin/StellarInterpreterProperty.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-zeppelin/src/main/java/org/apache/metron/stellar/zeppelin/StellarInterpreterProperty.java b/metron-stellar/stellar-zeppelin/src/main/java/org/apache/metron/stellar/zeppelin/StellarInterpreterProperty.java
new file mode 100644
index 0000000..7392219
--- /dev/null
+++ b/metron-stellar/stellar-zeppelin/src/main/java/org/apache/metron/stellar/zeppelin/StellarInterpreterProperty.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.stellar.zeppelin;
+
+import org.apache.metron.stellar.common.utils.ConversionUtils;
+
+import java.util.Map;
+
+/**
+ * Defines the properties that a user can define when configuring
+ * the Stellar Interpreter in Zeppelin.
+ */
+public enum StellarInterpreterProperty {
+
+  /**
+   * A property that defines the URL for connecting to Zookeeper.  By default this is empty.
+   */
+  ZOOKEEPER_URL("zookeeper.url", null);
+
+  /**
+   * The key or property name.
+   */
+  String key;
+
+  /**
+   * The default value of the property.
+   */
+  Object defaultValue;
+
+  StellarInterpreterProperty(String key, Object defaultValue) {
+    this.key = key;
+    this.defaultValue = defaultValue;
+  }
+
+  /**
+   * @return The key or name of the property.
+   */
+  public String getKey() {
+    return key;
+  }
+
+  /**
+   * @return The default value of the property.
+   */
+  public <T> T getDefault(Class<T> clazz) {
+    return ConversionUtils.convert(defaultValue, clazz);
+  }
+
+  /**
+   * Retrieves the property value from a map of properties.
+   * @param properties A map of properties.
+   * @return The value of the property within the map.
+   */
+  public <T> T get(Map<Object, Object> properties, Class<T> clazz) {
+    Object o = properties.getOrDefault(key, defaultValue);
+    return o == null ? null : ConversionUtils.convert(o, clazz);
+  }
+
+  @Override
+  public String toString() {
+    return key;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/StellarInterpreterPropertyTest.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/StellarInterpreterPropertyTest.java b/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/StellarInterpreterPropertyTest.java
new file mode 100644
index 0000000..d474eaf
--- /dev/null
+++ b/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/StellarInterpreterPropertyTest.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.stellar.zeppelin;
+
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.Map;
+
+import static org.apache.metron.stellar.zeppelin.StellarInterpreterProperty.ZOOKEEPER_URL;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests the StellarInterpreterProperty class.
+ */
+public class StellarInterpreterPropertyTest {
+
+  /**
+   * By defining the 'zookeeper.url' property a user is able to 'set' the Zookeeper URL.
+   */
+  @Test
+  public void testGet() {
+
+    // define the zookeeper URL property
+    final String expected = "zookeeper:2181";
+    Map<Object, Object> props = Collections.singletonMap("zookeeper.url", expected);
+
+    // should be able to get the zookeeper URL property from the properties
+    String actual = ZOOKEEPER_URL.get(props, String.class);
+    assertEquals(expected, actual);
+  }
+
+  /**
+   * The default value should be returned when the user does not defined a 'zookeeper.url'.
+   */
+  @Test
+  public void testGetWhenPropertyNotDefined() {
+
+    // the property is not defined
+    Map<Object, Object> props = Collections.singletonMap("foo", "bar");
+    String actual = ZOOKEEPER_URL.get(props, String.class);
+
+    // expect to get the default value since its not defined
+    String expected = ZOOKEEPER_URL.getDefault(String.class);
+    assertEquals(expected, actual);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/StellarInterpreterTest.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/StellarInterpreterTest.java b/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/StellarInterpreterTest.java
index 363938e..c817747 100644
--- a/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/StellarInterpreterTest.java
+++ b/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/StellarInterpreterTest.java
@@ -17,22 +17,29 @@
  */
 package org.apache.metron.stellar.zeppelin;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-
 import com.google.common.collect.Iterables;
 import org.apache.commons.lang3.StringUtils;
+import org.apache.metron.stellar.common.shell.VariableResult;
+import org.apache.metron.stellar.dsl.Context;
 import org.apache.zeppelin.interpreter.InterpreterContext;
 import org.apache.zeppelin.interpreter.InterpreterResult;
 import org.apache.zeppelin.interpreter.InterpreterResultMessage;
 import org.apache.zeppelin.interpreter.thrift.InterpreterCompletion;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.util.List;
+import java.util.Map;
+import java.util.Optional;
 import java.util.Properties;
 
+import static org.apache.metron.stellar.zeppelin.StellarInterpreterProperty.ZOOKEEPER_URL;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
 /**
  * Tests the StellarInterpreter.
  */
@@ -189,4 +196,49 @@ public class StellarInterpreterTest {
     // expect no completions
     assertEquals(0, completions.size());
   }
+
+  /**
+   * No Zookeeper client connection should be made if the Zookeeper URL is not defined
+   */
+  @Test
+  public void testOpenWithNoZookeeperURL() {
+
+    // no zookeeper URL defined
+    Properties props = new Properties();
+
+    // open the interpreter
+    interpreter = new StellarInterpreter(props);
+    interpreter.open();
+
+    // no zookeeper client should be defined
+    Optional<Object> zk = interpreter.getExecutor().getContext().getCapability(Context.Capabilities.ZOOKEEPER_CLIENT, false);
+    assertFalse(zk.isPresent());
+  }
+
+  /**
+   * Ensure that we can run Stellar code in the interpreter.
+   */
+  @Test
+  public void testExecuteStellarMultipleLines() {
+
+    // multi-line input
+    String input =
+            "x := 2 + 2" + System.lineSeparator() +
+            "y := 4 + 4";
+    InterpreterResult result = interpreter.interpret(input, context);
+
+    // expect x == 4 and y == 8
+    Map<String, VariableResult> vars = interpreter.getExecutor().getState();
+    assertEquals(4, vars.get("x").getResult());
+    assertEquals(8, vars.get("y").getResult());
+
+    // validate the result
+    assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+    assertEquals(1, result.message().size());
+
+    // the output is the result of only the 'last' expression
+    InterpreterResultMessage message = result.message().get(0);
+    assertEquals("8", message.getData());
+    assertEquals(InterpreterResult.Type.TEXT, message.getType());
+  }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/integration/ConfigUploadComponent.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/integration/ConfigUploadComponent.java b/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/integration/ConfigUploadComponent.java
new file mode 100644
index 0000000..9257e62
--- /dev/null
+++ b/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/integration/ConfigUploadComponent.java
@@ -0,0 +1,82 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.stellar.zeppelin.integration;
+
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.imps.CuratorFrameworkState;
+import org.apache.metron.integration.InMemoryComponent;
+import org.apache.metron.integration.UnableToStartException;
+
+import java.util.Map;
+
+import static org.apache.metron.common.configuration.ConfigurationsUtils.getClient;
+import static org.apache.metron.common.configuration.ConfigurationsUtils.writeGlobalConfigToZookeeper;
+
+/**
+ * Uploads configuration to Zookeeper.
+ */
+public class ConfigUploadComponent implements InMemoryComponent {
+
+  private String zookeeperURL;
+  private Map<String, Object> globals;
+
+  @Override
+  public void start() throws UnableToStartException {
+    try {
+      upload();
+
+    } catch (Exception e) {
+      throw new UnableToStartException(e.getMessage(), e);
+    }
+  }
+
+  @Override
+  public void stop() {
+    // nothing to do
+  }
+
+  /**
+   * Uploads configuration to Zookeeper.
+   * @throws Exception
+   */
+  private void upload() throws Exception {
+    assert zookeeperURL != null;
+    try(CuratorFramework client = getClient(zookeeperURL)) {
+      if(client.getState() != CuratorFrameworkState.STARTED) {
+        client.start();
+      }
+
+      if (globals != null) {
+        writeGlobalConfigToZookeeper(globals, client);
+      }
+    }
+  }
+
+
+  public ConfigUploadComponent withZookeeperURL(String zookeeperURL) {
+    this.zookeeperURL = zookeeperURL;
+    return this;
+  }
+
+  public ConfigUploadComponent withGlobals(Map<String, Object> globals) {
+    this.globals = globals;
+    return this;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/ab4f8e65/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/integration/StellarInterpreterIntegrationTest.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/integration/StellarInterpreterIntegrationTest.java b/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/integration/StellarInterpreterIntegrationTest.java
new file mode 100644
index 0000000..b6395eb
--- /dev/null
+++ b/metron-stellar/stellar-zeppelin/src/test/java/org/apache/metron/stellar/zeppelin/integration/StellarInterpreterIntegrationTest.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.stellar.zeppelin.integration;
+
+import org.apache.metron.integration.BaseIntegrationTest;
+import org.apache.metron.integration.ComponentRunner;
+import org.apache.metron.integration.components.ZKServerComponent;
+import org.apache.metron.stellar.dsl.Context;
+import org.apache.metron.stellar.zeppelin.StellarInterpreter;
+import org.apache.zeppelin.interpreter.InterpreterContext;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+
+import static org.apache.metron.stellar.zeppelin.StellarInterpreterProperty.ZOOKEEPER_URL;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+/**
+ * An integration test for the StellarInterpreter.
+ */
+public class StellarInterpreterIntegrationTest extends BaseIntegrationTest {
+
+  private StellarInterpreter interpreter;
+  private InterpreterContext context;
+  private Properties properties;
+  private String zookeeperURL;
+  private ZKServerComponent zkServer;
+  private ComponentRunner runner;
+
+  @Before
+  public void setup() throws Exception {
+
+    // a component that uploads the global configuration
+    Map<String, Object> globals = new HashMap<>();
+    ConfigUploadComponent configUploader = new ConfigUploadComponent()
+            .withGlobals(globals);
+
+    // create zookeeper component
+    properties = new Properties();
+    zkServer = getZKServerComponent(properties);
+
+    // can only get the zookeeperUrl AFTER it has started
+    zkServer.withPostStartCallback((zk) -> {
+      zookeeperURL = zk.getConnectionString();
+      configUploader.withZookeeperURL(zookeeperURL);
+    });
+
+    // start the integration test components
+    runner = new ComponentRunner.Builder()
+            .withComponent("zk", zkServer)
+            .withComponent("config", configUploader)
+            .build();
+    runner.start();
+
+    context = mock(InterpreterContext.class);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    runner.stop();
+  }
+
+  /**
+   * A user should be able to define a Zookeeper URL as a property.  When this property
+   * is defined, a connection to Zookeeper is created and available in the Stellar session.
+   */
+  @Test
+  public void testOpenWithZookeeperURL() {
+
+    // define a zookeeper URL
+    Properties props = new Properties();
+    props.put(ZOOKEEPER_URL.toString(), zookeeperURL);
+
+    // open the interpreter
+    interpreter = new StellarInterpreter(props);
+    interpreter.open();
+
+    // a zookeeper client should be defined
+    Optional<Object> zk = interpreter.getExecutor().getContext().getCapability(Context.Capabilities.ZOOKEEPER_CLIENT, false);
+    assertTrue(zk.isPresent());
+  }
+
+}


[07/50] [abbrv] metron git commit: METRON-1487 Define Performance Benchmarks for Enrichment Topology (nickwallen) closes apache/metron#961

Posted by rm...@apache.org.
METRON-1487 Define Performance Benchmarks for Enrichment Topology (nickwallen) closes apache/metron#961


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/e3eeec38
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/e3eeec38
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/e3eeec38

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: e3eeec38a66e1f10c296c110d47bc8bc3e995629
Parents: 52dd9fb
Author: nickwallen <ni...@nickallen.org>
Authored: Sat Mar 17 09:22:15 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Sat Mar 17 09:22:15 2018 -0400

----------------------------------------------------------------------
 metron-platform/Performance-tuning-guide.md     |   2 +
 .../metron-enrichment/Performance.md            | 514 +++++++++++++++++++
 2 files changed, 516 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/e3eeec38/metron-platform/Performance-tuning-guide.md
----------------------------------------------------------------------
diff --git a/metron-platform/Performance-tuning-guide.md b/metron-platform/Performance-tuning-guide.md
index bc8801b..7d79ace 100644
--- a/metron-platform/Performance-tuning-guide.md
+++ b/metron-platform/Performance-tuning-guide.md
@@ -422,6 +422,7 @@ modifying the options outlined above, increasing the poll timeout, or both.
 
 ## Reference
 
+* [Enrichment Performance](metron-enrichment/Performance.md)
 * http://storm.apache.org/releases/1.0.1/flux.html
 * https://stackoverflow.com/questions/17257448/what-is-the-task-in-storm-parallelism
 * http://storm.apache.org/releases/current/Understanding-the-parallelism-of-a-Storm-topology.html
@@ -429,3 +430,4 @@ modifying the options outlined above, increasing the poll timeout, or both.
 * https://www.confluent.io/blog/how-to-choose-the-number-of-topicspartitions-in-a-kafka-cluster/
 * https://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.6.1/bk_storm-component-guide/content/storm-kafkaspout-perf.html
 
+

http://git-wip-us.apache.org/repos/asf/metron/blob/e3eeec38/metron-platform/metron-enrichment/Performance.md
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/Performance.md b/metron-platform/metron-enrichment/Performance.md
new file mode 100644
index 0000000..4016a0d
--- /dev/null
+++ b/metron-platform/metron-enrichment/Performance.md
@@ -0,0 +1,514 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+# Enrichment Performance
+
+This guide defines a set of benchmarks used to measure the performance of the Enrichment topology.  The guide also provides detailed steps on how to execute those benchmarks along with advice for tuning the Unified Enrichment topology.
+
+* [Benchmarks](#benchmarks)
+* [Benchmark Execution](#benchmark-execution)
+* [Performance Tuning](#performance-tuning)
+* [Benchmark Results](#benchmark-results)
+
+## Benchmarks
+
+The following section describes a set of enrichments that will be used to benchmark the performance of the Enrichment topology.
+
+* [Geo IP Enrichment](#geo-ip-enrichment)
+* [HBase Enrichment](#hbase-enrichment)
+* [Stellar Enrichment](#stellar-enrichment)
+
+### Geo IP Enrichment
+
+This benchmark measures the performance of executing a Geo IP enrichment.  Given a valid IP address the enrichment will append detailed location information for that IP.  The location information is sourced from an external Geo IP data source like [Maxmind](https://github.com/maxmind/GeoIP2-java).
+
+#### Configuration
+
+Adding the following Stellar expression to the Enrichment topology configuration will define a Geo IP enrichment.
+```
+geo := GEO_GET(ip_dst_addr)
+```
+
+After the enrichment process completes, the  telemetry message will contain a set of fields with location information for the given IP address.
+```
+{
+   "ip_dst_addr":"151.101.129.140",
+   ...
+   "geo.city":"San Francisco",
+   "geo.country":"US",
+   "geo.dmaCode":"807",
+   "geo.latitude":"37.7697",
+   "geo.location_point":"37.7697,-122.3933",
+   "geo.locID":"5391959",
+   "geo.longitude":"-122.3933",
+   "geo.postalCode":"94107",
+ }
+```
+
+### HBase Enrichment
+
+This benchmark measures the performance of executing an enrichment that retrieves data from an external HBase table. This type of enrichment is useful for enriching telemetry from an Asset Database or other source of relatively static data.
+
+#### Configuration
+
+Adding the following Stellar expression to the Enrichment topology configuration will define an Hbase enrichment.  This looks up the 'ip_dst_addr' within an HBase table 'top-1m' and returns a hostname.
+```
+top1m := ENRICHMENT_GET('top-1m', ip_dst_addr, 'top-1m', 't')
+```
+
+After the telemetry has been enriched, it will contain the host and IP elements that were retrieved from the HBase table.
+```
+{
+	"ip_dst_addr":"151.101.2.166",
+	...
+	"top1m.host":"earther.com",
+	"top1m.ip":"151.101.2.166"
+}
+```
+
+### Stellar Enrichment
+
+This benchmark measures the performance of executing a basic Stellar expression.  In this benchmark, the enrichment is purely a computational task that has no dependence on an external system like a database.  
+
+#### Configuration
+
+Adding the following Stellar expression to the Enrichment topology configuration will define a basic Stellar enrichment.  The following returns true if the IP is in the given subnet and false otherwise.
+```
+local := IN_SUBNET(ip_dst_addr, '192.168.0.0/24')
+```
+
+After the telemetry has been enriched, it will contain a field with a boolean value indicating whether the IP was within the given subnet.
+```
+{
+	"ip_dst_addr":"151.101.2.166",
+	...
+	"local":false
+}
+```
+
+## Benchmark Execution
+
+This section describes the steps necessary to execute the performance benchmarks for the Enrichment topology.
+
+* [Prepare Enrichment Data](#prepare-enrichment-data)
+* [Load HBase with Enrichment Data](#load-hbase-with-enrichment-data)
+* [Configure the Enrichments](#configure-the-enrichments)
+* [Create Input Telemetry](#create-input-telemetry)
+* [Cluster Setup](#cluster-setup)
+* [Monitoring](#monitoring)
+
+### Prepare Enrichment Data
+
+The Alexa Top 1 Million was used as a data source for these benchmarks.
+
+1. Download the [Alexa Top 1 Million](http://s3.amazonaws.com/alexa-static/top-1m.csv.zip) or another similar data set with a variety of valid hostnames.
+
+2. For each hostname, query DNS to retrieve an associated IP address.  
+
+	A script like the following can be used for this.  There is no need to do this for all 1 million entries in the data set. Doing this for around 10,000 records is sufficient.
+        
+	```python
+	import dns.resolver
+	import csv
+	#
+	resolver = dns.resolver.Resolver()
+	resolver.nameservers = ['8.8.8.8', '8.8.4.4']
+	#
+	with open('top-1m.csv', 'r') as infile:
+	  with open('top-1m-with-ip.csv', 'w') as outfile:
+	    #
+	    reader = csv.reader(infile, delimiter=',')
+	    writer = csv.writer(outfile, delimiter=',')
+	    for row in reader:
+	      #
+	      host = row[1]
+	      try:
+	        response = resolver.query(host, "A")
+	        for record in response:
+	          ip = record
+	          writer.writerow([host, ip])
+	          print "host={}, ip={}".format(host, ip)
+	        #
+	      except:
+	        pass
+	```
+
+3. The resulting data set contains an IP to hostname mapping.
+	```bash
+	$ head top-1m-with-ip.csv
+	google.com,172.217.9.46
+	youtube.com,172.217.4.78
+	facebook.com,157.240.18.35
+	baidu.com,220.181.57.216
+	baidu.com,111.13.101.208
+	baidu.com,123.125.114.144
+	wikipedia.org,208.80.154.224
+	yahoo.com,98.139.180.180
+	yahoo.com,206.190.39.42
+	reddit.com,151.101.1.140
+	```
+
+### Load HBase with Enrichment Data
+
+1. Create an HBase table for this data.  
+
+	Ensure that the table is evenly distributed across the HBase nodes.  This can be done by pre-splitting the table or splitting the data after loading it.  
+
+	```
+	create 'top-1m', 't', {SPLITS => ['2','4','6','8','a','c','e']}
+	```
+
+1. Create a configuration file called `extractor.json`.  This defines how the data will be loaded into the HBase table.
+
+	```bash
+	> cat extractor.json
+	{
+	    "config": {
+	        "columns": {
+	            "host" : 0,
+	            "ip": 1
+	        },
+	        "indicator_column": "ip",
+	        "type": "top-1m",
+	        "separator": ","
+	    },
+	    "extractor": "CSV"
+	}
+	```
+
+1. Use the `flatfile_loader.sh` to load the data into the HBase table.
+	```
+	$METRON_HOME/bin/flatfile_loader.sh \
+		-e extractor.json \
+		-t top-1m \
+		-c t \
+		-i top-1m-with-ip.csv
+	```
+
+### Configure the Enrichments
+
+1. Define the Enrichments using the REPL.
+
+	```
+	> $METRON_HOME/bin/stellar -z $ZOOKEEPER
+	Stellar, Go!
+	[Stellar]>>> conf
+	{
+	  "enrichment": {
+	    "fieldMap": {
+	     "stellar" : {
+	       "config" : {
+	         "geo" : "GEO_GET(ip_dst_addr)",
+	         "top1m" : "ENRICHMENT_GET('top-1m', ip_dst_addr, 'top-1m', 't')",
+	         "local" : "IN_SUBNET(ip_dst_addr, '192.168.0.0/24')"
+	       }
+	     }
+	    },
+	    "fieldToTypeMap": {
+	    }
+	  },
+	  "threatIntel": {
+	  }
+	}
+	[Stellar]>>> CONFIG_PUT("ENRICHMENT", conf, "asa")
+	```
+
+### Create Input Telemetry
+
+1.  Create a template file that defines what your input telemetry will look-like.
+
+	```bash
+	> cat asa.template
+	{"ciscotag": "ASA-1-123123", "source.type": "asa", "ip_dst_addr": "$DST_ADDR", "original_string": "<134>Feb 22 17:04:43 AHOSTNAME %ASA-1-123123: Built inbound ICMP connection for faddr 192.168.11.8/50244 gaddr 192.168.1.236/0 laddr 192.168.1.1/161", "ip_src_addr": "192.168.1.35", "syslog_facility": "local1", "action": "built", "syslog_host": "AHOSTNAME", "timestamp": "$METRON_TS", "protocol": "icmp", "guid": "$METRON_GUID", "syslog_severity": "info"}
+	```
+
+2.  Use the template file along with the enrichment data to create input telemetry with varying IP addresses.
+
+	```bash
+	for i in $(head top-1m-with-ip.csv | awk -F, '{print $2}');do
+		cat asa.template | sed "s/\$DST_ADDR/$i/";
+	done > asa.input.template
+	```
+
+3. Use the `load_tool.sh` script to push messages onto the input topic `enrichments` and monitor the output topic `indexing`.  See more information in the Performance [README.md](metron-contrib/metron-performance/README.md).
+
+	If the topology is keeping up, obviously the events per second produced on the input topic should roughly match the output topic.
+
+### Cluster Setup
+
+#### Isolation
+
+The Enrichment topology depends on an environment with at least two and often three components that work together; Storm, Kafka, and HBase.  When any of two of these are run on the same node, it can be difficult to identify which of them is becoming a bottleneck.  This can cause poor and highly volatile performance as each steals resources from the other.  
+
+It is highly recommended that each of these systems be fully isolated from the others.  Storm should be run on nodes that are completely isolated from Kafka and HBase.
+
+### Monitoring
+
+1. The `load_test.sh` script will report the throughput for the input and output topics.  
+
+	* The input throughput should roughly match the output throughput if the topology is able to handle a given load.
+
+	* Not only are the raw throughput numbers important, but also the consistency of what is reported over time.  If the reported throughput is sporadic, then further tuning may be required.
+
+1. The Storm UI is obviously an important source of information.  The bolt capacity, complete latency, and any reported errors are all important to monitor
+
+1. The load reported by the OS is also an important metric to monitor.  
+
+	* The load metric should be monitored to ensure that each node is being pushed sufficiently, but not too much.
+
+	* The load should be evenly distributed across each node.  If the load is uneven, this may indicate a problem.
+
+	A simple script like the following is sufficient for the task.
+
+	```
+	for host in $(cat cluster.txt); do
+	  echo $host;
+	  ssh root@$host 'uptime';
+	done
+	```
+
+1. Monitoring the Kafka offset lags indicates how far behind a consumer may be.  This can be very useful to determine if the topology is keeping up.
+
+	```
+	${KAFKA_HOME}/bin/kafka-consumer-groups.sh \
+	    --command-config=/tmp/consumergroup.config \
+	    --describe \
+	    --group enrichments \
+	    --bootstrap-server $BROKERLIST \
+	    --new-consumer
+	```
+
+1. A tool like [Kafka Manager](https://github.com/yahoo/kafka-manager) is also very useful for monitoring the input and output topics during test execution.
+
+## Performance Tuning
+
+The approach to tuning the topology will look something like the following.  More detailed tuning information is available next to each named parameter
+
+* Start the tuning process with a single worker.  After tuning the bolts within a single worker, scale out with additional worker processes.
+
+* Initially set the thread pool size to 1.  Increase this value slowly only after tuning the other parameters first.  Consider that each worker has its own thread pool and the total size of this thread pool should be far less than the total number of cores available in the cluster.
+
+* Initially set each bolt parallelism hint to the number of partitions on the input Kafka topic.  Monitor bolt capacity and increase the parallelism hint for any bolt whose capacity is close to or exceeds 1.  
+
+* If the topology is not able to keep-up with a given input, then increasing the parallelism is the primary means to scale up.
+
+* Parallelism units can be used for determining how to distribute processing tasks across the topology.  The sum of parallelism can be close to, but should not far exceed this value.
+
+	 (number of worker nodes in cluster * number cores per worker node) - (number of acker tasks)
+
+* The throughput that the topology is able to sustain should be relatively consistent.  If the throughput fluctuates greatly, increase back pressure using [`topology.max.spout.pending`](#topologymaxspoutpending).
+
+### Parameters
+
+The following parameters are useful for tuning the "Unified" Enrichment topology.  
+
+WARNING: Some of the parameter names have been reused from the "Split/Join" topology so the name may not be appropriate. This will be corrected in the future.
+
+* [`enrichment.workers`](#enrichmentworkers)
+* [`enrichment.acker.executors`](#enrichmentackerexecutors)
+* [`topology.worker.childopts`](#topologyworkerchildopts)
+* [`topology.max.spout.pending`](#topologymaxspoutpending)
+* [`kafka.spout.parallelism`](#kafkaspoutparallelism)
+* [`enrichment.join.parallelism`](#enrichmentjoinparallelism)
+* [`threat.intel.join.parallelism`](#threatinteljoinparallelism)
+* [`kafka.writer.parallelism`](#kafkawriterparallelism)
+* [`enrichment.join.cache.size`](#enrichmentjoincachesize)
+* [`threat.intel.join.cache.size`](#threatinteljoincachesize)
+* [`metron.threadpool.size`](#metronthreadpoolsize)
+* [`metron.threadpool.type`](#metronthreadpooltype)
+
+#### `enrichment.workers`
+
+The number of worker processes for the enrichment topology.
+
+* Start by tuning only a single worker.  Maximize throughput for that worker, then increase the number of workers.
+
+* The throughput should scale relatively linearly as workers are added.  This reaches a limit as the number of workers running on a single node saturate the resources available.  When this happens, adding workers, but on additional nodes should allow further scaling.
+
+* Increase parallelism before attempting to increase the number of workers.
+
+#### `enrichment.acker.executors`
+
+The number of ackers within the topology.
+
+* This should most often be equal to the number of workers defined in `enrichment.workers`.
+
+* Within the Storm UI, click the "Show System Stats" button.  This will display a bolt named `__acker`.  If the capacity of this bolt is too high, then increase the number of ackers.
+
+#### `topology.worker.childopts`
+
+This parameter accepts arguments that will be passed to the JVM created for each Storm worker.  This allows for control over the heap size, garbage collection, and any other JVM-specific parameter.
+
+* Start with a 2G heap and increase as needed.  Running with 8G was found to be beneficial, but will vary depending on caching needs.
+
+    `-Xms8g -Xmx8g`
+
+* The Garbage First Garbage Collector (G1GC) is recommended along with a cap on the amount of time spent in garbage collection.  This is intended to help address small object allocation issues due to our extensive use of caches.
+
+    `-XX:+UseG1GC -XX:MaxGCPauseMillis=100`
+
+* If the caches in use are very large (as defined by either [`enrichment.join.cache.size`](#enrichmentjoincachesize) or [`threat.intel.join.cache.size`](#threatinteljoincachesize)) and performance is poor, turning on garbage collection logging might be helpful.
+
+#### `topology.max.spout.pending`
+
+This limits the number of unacked tuples that the spout can introduce into the topology.
+
+* Decreasing this value will increase back pressure and allow the topology to consume messages at a pace that is maintainable.
+
+* If the spout throws 'Commit Failed Exceptions' then the topology is not keeping up.  Decreasing this value is one way to ensure that messages can be processed before they time out.
+
+* If the topology's throughput is unsteady and inconsistent, decrease this value.  This should help the topology consume messages at a maintainable pace.
+
+* If the bolt capacity is low, the topology can handle additional load.  Increase this value so that more tuples are introduced into the topology which should increase the bolt capacity.
+
+#### `kafka.spout.parallelism`
+
+The parallelism of the Kafka spout within the topology.  Defines the maximum number of executors for each worker dedicated to running the spout.
+
+* The spout parallelism should most often be set to the number of partitions of the input Kafka topic.
+
+* If the enrichment bolt capacity is low, increasing the parallelism of the spout can introduce additional load on the topology.
+
+####  `enrichment.join.parallelism`
+
+The parallelism hint for the enrichment bolt.  Defines the maximum number of executors within each worker dedicated to running the enrichment bolt.
+
+WARNING: The property name does not match its current usage in the Unified topology.  This property name may change in the near future as it has been reused from the Split-Join topology.  
+
+* If the capacity of the enrichment bolt is high, increasing the parallelism will introduce additional executors to bring the bolt capacity down.
+
+* If the throughput of the topology is too low, increase this value.  This allows additional tuples to be enriched in parallel.
+
+* Increasing parallelism on the enrichment bolt will at some point put pressure on the downstream threat intel and output bolts.  As this value is increased, monitor the capacity of the downstream bolts to ensure that they do not become a bottleneck.
+
+#### `threat.intel.join.parallelism`
+
+The parallelism hint for the threat intel bolt.  Defines the maximum number of executors within each worker dedicated to running the threat intel bolt.
+
+WARNING: The property name does not match its current usage in the Unified topology.  This property name may change in the near future as it has been reused from the Split-Join topology.  
+
+* If the capacity of the threat intel bolt is high, increasing the parallelism will introduce additional executors to bring the bolt capacity down.
+
+* If the throughput of the topology is too low, increase this value.  This allows additional tuples to be enriched in parallel.
+
+* Increasing parallelism on this bolt will at some point put pressure on the downstream output bolt.  As this value is increased, monitor the capacity of the output bolt to ensure that it does not become a bottleneck.
+
+#### `kafka.writer.parallelism`
+
+The parallelism hint for the output bolt which writes to the output Kafka topic.  Defines the maximum number of executors within each worker dedicated to running the output bolt.
+
+* If the capacity of the output bolt is high, increasing the parallelism will introduce additional executors to bring the bolt capacity down.
+
+#### `enrichment.join.cache.size`
+
+The Enrichment bolt maintains a cache so that if the same enrichment occurs repetitively, the value can be retrieved from the cache instead of it being recomputed.  
+
+There is a great deal of repetition in network telemetry, which leads to a great deal of repetition for the enrichments that operate on that telemetry.  Having a highly performant cache is one of the most critical factors driving performance.
+
+WARNING: The property name does not match its current usage in the Unified topology.  This property name may change in the near future as it has been reused from the Split-Join topology.  
+
+* Increase the size of the cache to improve the rate of cache hits.
+
+* Increasing the size of the cache may require that you increase the worker heap size using `topology.worker.childopts'.  
+
+#### `threat.intel.join.cache.size`
+
+The Threat Intel bolt maintains a cache so that if the same enrichment occurs repetitively, the value can be retrieved from the cache instead of it being recomputed.  
+
+There is a great deal of repetition in network telemetry, which leads to a great deal of repetition for the enrichments that operate on that telemetry.  Having a highly performant cache is one of the most critical factors driving performance.
+
+WARNING: The property name does not match its current usage in the Unified topology.  This property name may change in the near future as it has been reused from the Split-Join topology.  
+
+* Increase the size of the cache to improve the rate of cache hits.
+
+* Increasing the size of the cache may require that you increase the worker heap size using `topology.worker.childopts'.  
+
+#### `metron.threadpool.size`
+
+This value defines the number of threads maintained within a pool to execute each enrichment.  This value can either be a fixed number or it can be a multiple of the number of cores (5C = 5 times the number of cores).
+
+The enrichment bolt maintains a static thread pool that is used to execute each enrichment.  This thread pool is shared by all of the executors running within the same worker.  
+
+WARNING: This value must be manually defined within the flux file at `$METRON_HOME/flux/enrichment/remote-unified.yaml`.  This value cannot be altered within Ambari at this time.
+
+* Start with a thread pool size of 1.  Adjust this value after tuning all other parameters first.  Only increase this value if testing shows performance improvements in your environment given your workload.  
+
+* If the thread pool size is too large this will cause the work to be shuffled amongst multiple CPU cores, which significantly decreases performance.  Using a smaller thread pool helps pin work to a single core.
+
+* If the thread pool size is too small this can negatively impact IO-intensive workloads.  Increasing the thread pool size, helps when using IO-intensive workloads with a significant cache miss rate.   A thread pool size of 3-5 can help in these cases.
+
+* Most workloads will make significant use of the cache and so 1-2 threads will most likely be optimal.
+
+* The bolt uses a static thread pool.  To scale out, but keep the work mostly pinned to a CPU core, add more Storm workers while keeping the thread pool size low.
+
+* If a larger thread pool increases load on the system, but decreases the throughput, then it is likely that the system is thrashing.  In this case the thread pool size should be decreased.
+
+#### `metron.threadpool.type`
+
+The enrichment bolt maintains a static thread pool that is used to execute each enrichment.  This thread pool is shared by all of the executors running within the same worker.  
+
+Defines the type of thread pool used.  This value can be either "FIXED" or "WORK_STEALING".
+
+Currently, this value must be manually defined within the flux file at `$METRON_HOME/flux/enrichment/remote-unified.yaml`.  This value cannot be altered within Ambari.
+
+### Benchmark Results
+
+This section describes one execution of these benchmarks to help provide an understanding of what reasonably tuned parameters might look-like.  
+
+These parameters and the throughput reported are highly dependent on the workload and resources available. The throughput is what was achievable given a reasonable amount of tuning on a small, dedicated cluster.  The throughput is largely dependent on the enrichments performed and the distribution of data within the incoming telemetry.
+
+The Enrichment topology has been show to scale relatively linearly.  Adding more resources allows for more complex enrichments, across more diverse data sets, at higher volumes.  The throughput that one might see in production largely depends on how much hardware can be committed to the task.  
+
+#### Environment
+
+* Apache Metron 0.4.3 (pre-release) March, 2018
+	* This included [a patch to the underlying caching mechanism](https://github.com/apache/metron/pull/947) that greatly improves performance.
+
+* Cisco UCS nodes
+ 	* 32 core, 64-bit CPU (Intel(R) Xeon(R) CPU E5-2630 v3 @ 2.40GHz)
+	* 256 GB RAM
+	* x2 10G NIC bonded
+	* x4 6TB 7200 RPM disks
+
+* Storm Supervisors are isolated and running on a dedicated set of 3 nodes.
+
+* Kafka Brokers are isolated and running on a separate, dedicated set of 3 nodes.
+
+#### Results
+
+* These benchmarks executed all 3 enrichments simultaneously; the [Geo IP Enrichment](#geo-ip-enrichment), [Stellar Enrichment](#stellar-enrichment) and the [HBase Enrichment](#hbase-enrichment).
+
+* The data used to drive the benchmark includes 10,000 unique IP addresses.  The telemetry was populated with IP addresses such that 10% of these IPs were chosen 80% of the time.  This bias was designed to mimic the typical distribution seen in real-world telemetry.
+
+* The Unified Enrichment topology was able to sustain 308,000 events per second on a small, dedicated 3 node cluster.
+
+* The values used to achieve these results with the Unified Enrichment topology follows.  You should not attempt to use these parameters in your topology directly.  These are specific to the environment and workload and should only be used as a guideline.
+	```
+	enrichment.workers=9
+	enrichment.acker.executors=9
+	enrichment.join.cache.size=100000
+  	threat.intel.join.cache.size=100000
+	kafka.spout.parallelism=27
+	enrichment.join.parallelism=54
+	threat.intel.join.parallelism=9
+	kafka.writer.parallelism=27
+	topology.worker.childopts=-XX:+UseG1GC -Xms8g -Xmx8g -XX:MaxGCPauseMillis=100
+	topology.max.spout.pending=3000
+	metron.threadpool.size=1
+	metron.threadpool.type=FIXED
+	```


[40/50] [abbrv] metron git commit: METRON-1499 Enable Configuration of Unified Enrichment Topology via Ambari (nickwallen) closes apache/metron#984

Posted by rm...@apache.org.
METRON-1499 Enable Configuration of Unified Enrichment Topology via Ambari (nickwallen) closes apache/metron#984


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/82212ba8
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/82212ba8
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/82212ba8

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 82212ba818a0ef3b92505e8d144487c69a8d4a44
Parents: 3fcbf8b
Author: nickwallen <ni...@nickallen.org>
Authored: Tue Apr 17 09:43:16 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Tue Apr 17 09:43:16 2018 -0400

----------------------------------------------------------------------
 .../packaging/ambari/metron-mpack/pom.xml       |   7 +-
 .../configuration/metron-enrichment-env.xml     | 130 +++-
 .../package/scripts/enrichment_commands.py      |  20 +-
 .../package/scripts/enrichment_master.py        |  12 +-
 .../package/scripts/params/params_linux.py      |  19 +-
 .../enrichment-splitjoin.properties.j2          |  63 ++
 .../templates/enrichment-unified.properties.j2  |  60 ++
 .../METRON/CURRENT/themes/metron_theme.json     | 151 ++++-
 .../docker/rpm-docker/SPECS/metron.spec         |   5 +-
 .../main/config/enrichment-splitjoin.properties |  63 ++
 .../config/enrichment-splitjoin.properties.j2   |  63 ++
 .../main/config/enrichment-unified.properties   |  69 +++
 .../config/enrichment-unified.properties.j2     |  60 ++
 .../src/main/config/enrichment.properties       |  64 --
 .../src/main/config/enrichment.properties.j2    |  63 --
 .../main/flux/enrichment/remote-splitjoin.yaml  | 590 +++++++++++++++++++
 .../main/flux/enrichment/remote-unified.yaml    |  71 ++-
 .../src/main/flux/enrichment/remote.yaml        | 590 -------------------
 .../main/scripts/start_enrichment_topology.sh   |  16 +-
 .../integration/EnrichmentIntegrationTest.java  |  61 +-
 .../UnifiedEnrichmentIntegrationTest.java       |  71 +++
 21 files changed, 1433 insertions(+), 815 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-deployment/packaging/ambari/metron-mpack/pom.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/pom.xml b/metron-deployment/packaging/ambari/metron-mpack/pom.xml
index 491e8dd..1843eb7 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/pom.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/pom.xml
@@ -110,7 +110,8 @@
                                 <resource>
                                     <directory>${basedir}/../../../../metron-platform/metron-enrichment/src/main/config</directory>
                                     <includes>
-                                        <include>enrichment.properties.j2</include>
+                                        <include>enrichment-splitjoin.properties.j2</include>
+                                        <include>enrichment-unified.properties.j2</include>
                                     </includes>
                                     <filtering>false</filtering>
                                 </resource>
@@ -171,8 +172,10 @@
                         <fileset>
                             <directory>${basedir}/src/main/resources/common-services/METRON/CURRENT/package/templates</directory>
                             <includes>
-                                <include>enrichment.properties.j2</include>
+                                <include>enrichment-unified.properties.j2</include>
+                                <include>enrichment-splitjoin.properties.j2</include>
                                 <include>elasticsearch.properties.j2</include>
+                                <include>hdfs.properties.j2</include>
                             </includes>
                         </fileset>
                     </filesets>

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-enrichment-env.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-enrichment-env.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-enrichment-env.xml
index 9737660..81b135c 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-enrichment-env.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-enrichment-env.xml
@@ -18,6 +18,10 @@
   limitations under the License.
 -->
 <configuration supports_final="true">
+
+  <!--
+    enrichment adapter parameters
+  -->
   <property>
     <name>geoip_url</name>
     <value>http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz</value>
@@ -33,6 +37,10 @@
       <type>content</type>
     </value-attributes>
   </property>
+
+  <!--
+    kafka parameters
+  -->
   <property>
     <name>enrichment_kafka_start</name>
     <description>Enrichment Topology Spout Offset</description>
@@ -81,6 +89,10 @@
     <value>indexing</value>
     <display-name>Threat Intel Error Topic</display-name>
   </property>
+
+  <!--
+    hbase parameters
+  -->
   <property>
     <name>enrichment_hbase_table</name>
     <value>enrichment</value>
@@ -105,6 +117,10 @@
     <description>The HBase column family which will hold threatintel data in HBase.</description>
     <display-name>HBase Table Column Family</display-name>
   </property>
+
+  <!--
+    storm common parameters
+  -->
   <property>
     <name>enrichment_workers</name>
     <description>Number of Workers for the Enrichment Topology</description>
@@ -129,70 +145,156 @@
   <property>
     <name>enrichment_topology_max_spout_pending</name>
     <description>Spout Max Pending Tuples for the Enrichment Topology</description>
-    <value/>
-    <display-name>Enrichment Max Pending</display-name>
+    <value>500</value>
+    <display-name>Enrichment Max Spout Pending</display-name>
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
   </property>
   <property>
+    <name>enrichment_topology</name>
+    <description>Which Enrichment topology to execute</description>
+    <value>Split-Join</value>
+    <display-name>Enrichment Topology</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>Split-Join</value>
+        </entry>
+        <entry>
+          <value>Unified</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <!--
+    split-join topology parameters
+  -->
+  <property>
     <name>enrichment_join_cache_size</name>
-    <description>Enrichment Join Bolt Cache Max Size</description>
+    <description>Enrichment join bolt max cache size for the Split Join Enrichment Topology</description>
     <value>100000</value>
-    <display-name>Enrichment Join Size</display-name>
+    <display-name>Enrichment Join Max Cache Size</display-name>
   </property>
   <property>
     <name>threatintel_join_cache_size</name>
-    <description>Threat Intel Join Bolt Cache Max Size</description>
+    <description>Threat Intel join bolt max cache size for the Split Join Enrichment Topology</description>
     <value>100000</value>
-    <display-name>Threat Intel Join Size</display-name>
+    <display-name>Threat Intel Join Max Cache Size</display-name>
   </property>
   <property>
     <name>enrichment_kafka_spout_parallelism</name>
-    <description>Kafka Spout Parallelism for the Enrichment Topology</description>
+    <description>Kafka spout parallelism for the Split Join Enrichment Topology</description>
     <value>1</value>
     <display-name>Enrichment Spout Parallelism</display-name>
   </property>
   <property>
     <name>enrichment_split_parallelism</name>
-    <description>Enrichment Split Bolt Parallelism for the Enrichment Topology</description>
+    <description>Enrichment split bolt parallelism for the Split Join Enrichment Topology</description>
     <value>1</value>
     <display-name>Enrichment Split Parallelism</display-name>
   </property>
   <property>
     <name>enrichment_stellar_parallelism</name>
-    <description>Stellar Enrichment Bolt Parallelism for the Enrichment Topology</description>
+    <description>Stellar enrichment bolt parallelism for the Split Join Enrichment Topology</description>
     <value>1</value>
     <display-name>Stellar Enrichment Parallelism</display-name>
   </property>
   <property>
     <name>enrichment_join_parallelism</name>
-    <description>Enrichment Join Bolt Parallelism for the Enrichment Topology</description>
+    <description>Enrichment join bolt parallelism for the Split Join Enrichment Topology</description>
     <value>1</value>
     <display-name>Enrichment Join Parallelism</display-name>
   </property>
   <property>
     <name>threat_intel_split_parallelism</name>
-    <description>Threat Intel Split Bolt Parallelism for the Enrichment Topology</description>
+    <description>Threat Intel split bolt parallelism for the Split Join Enrichment Topology</description>
     <value>1</value>
     <display-name>Threat Intel Split Parallelism</display-name>
   </property>
   <property>
     <name>threat_intel_stellar_parallelism</name>
-    <description>Threat Intel Stellar Bolt Parallelism for the Enrichment Topology</description>
+    <description>Threat Intel stellar bolt parallelism for the Split Join Enrichment Topology</description>
     <value>1</value>
     <display-name>Threat Intel Stellar Parallelism</display-name>
   </property>
   <property>
     <name>threat_intel_join_parallelism</name>
-    <description>Threat Intel Join Bolt Parallelism for the Enrichment Topology</description>
+    <description>Threat Intel join bolt parallelism for the Split Join Enrichment Topology</description>
     <value>1</value>
     <display-name>Threat Intel Join Parallelism</display-name>
   </property>
   <property>
     <name>kafka_writer_parallelism</name>
-    <description>Kafka Writer Parallelism for the Enrichment Topology</description>
+    <description>Kafka writer parallelism for the Split Join Enrichment Topology</description>
     <value>1</value>
     <display-name>Enrichment Kafka Writer Parallelism</display-name>
   </property>
+
+  <!--
+    unified topology parameters
+  -->
+  <property>
+    <name>unified_kafka_spout_parallelism</name>
+    <description>Kafka spout parallelism for the Unified Enrichment Topology</description>
+    <value>1</value>
+    <display-name>Unified Enrichment Spout Parallelism</display-name>
+  </property>
+  <property>
+    <name>unified_enrichment_parallelism</name>
+    <description>Enrichment parallelism for the Unified Enrichment Topology</description>
+    <value>1</value>
+    <display-name>Unified Enrichment Parallelism</display-name>
+  </property>
+  <property>
+    <name>unified_threat_intel_parallelism</name>
+    <description>Threat Intel parallelism for the Unified Enrichment Topology</description>
+    <value>1</value>
+    <display-name>Unified Threat Intel Parallelism</display-name>
+  </property>
+  <property>
+    <name>unified_kafka_writer_parallelism</name>
+    <description>Kafka writer parallelism for the Unified Enrichment Topology</description>
+    <value>1</value>
+    <display-name>Unified Kafka Writer Parallelism</display-name>
+  </property>
+  <property>
+    <name>unified_enrichment_cache_size</name>
+    <description>Enrichment max cache size for the Unified Enrichment Topology</description>
+    <value>100000</value>
+    <display-name>Unified Enrichment Cache Size</display-name>
+  </property>
+  <property>
+    <name>unified_threat_intel_cache_size</name>
+    <description>Threat Intel Max Cache Size for the Unified Enrichment Topology</description>
+    <value>100000</value>
+    <display-name>Unified Threat Intel Cache Size</display-name>
+  </property>
+  <property>
+    <name>unified_enrichment_threadpool_size</name>
+    <description>Enrichment thread pool size for the Unified Enrichment Topology</description>
+    <value>1</value>
+    <display-name>Unified Enrichment Thread Pool Size</display-name>
+  </property>
+  <property>
+    <name>unified_enrichment_threadpool_type</name>
+    <description>Enrichment thread pool type for the Unified Enrichment Topology</description>
+    <display-name>Unified Enrichment Thread Pool Type</display-name>
+    <value>FIXED</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>FIXED</value>
+        </entry>
+        <entry>
+          <value>WORK_STEALING</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_commands.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_commands.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_commands.py
index f9ec547..a1bdbed 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_commands.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_commands.py
@@ -129,13 +129,21 @@ class EnrichmentCommands:
         Logger.info("Starting Metron enrichment topology: {0}".format(self.__enrichment_topology))
 
         if not self.is_topology_active(env):
-            start_cmd_template = """{0}/bin/start_enrichment_topology.sh \
-                                        -s {1} \
-                                        -z {2}"""
+
+            # which enrichment topology needs started?
+            if self.__params.enrichment_topology == "Unified":
+                topology_flux = "{0}/flux/enrichment/remote-unified.yaml".format(self.__params.metron_home)
+                topology_props = "{0}/config/enrichment-unified.properties".format(self.__params.metron_home)
+            elif self.__params.enrichment_topology == "Split-Join":
+                topology_flux = "{0}/flux/enrichment/remote-splitjoin.yaml".format(self.__params.metron_home)
+                topology_props = "{0}/config/enrichment-splitjoin.properties".format(self.__params.metron_home)
+            else:
+                raise Fail("Unexpected enrichment topology; name=" + self.__params.enrichment_topology)
+
+            # start the topology
+            start_cmd_template = """{0}/bin/start_enrichment_topology.sh --remote {1} --filter {2}"""
             Logger.info('Starting ' + self.__enrichment_topology)
-            start_cmd = start_cmd_template.format(self.__params.metron_home,
-                                                  self.__enrichment_topology,
-                                                  self.__params.zookeeper_quorum)
+            start_cmd = start_cmd_template.format(self.__params.metron_home, topology_flux, topology_props)
             Execute(start_cmd, user=self.__params.metron_user, tries=3, try_sleep=5, logoutput=True)
         else:
             Logger.info('Enrichment topology already running')

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_master.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_master.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_master.py
index 24feb81..cada1d2 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_master.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/enrichment_master.py
@@ -38,11 +38,15 @@ class Enrichment(Script):
         env.set_params(params)
 
         Logger.info("Running enrichment configure")
-        File(format("{metron_config_path}/enrichment.properties"),
-             content=Template("enrichment.properties.j2"),
+        File(format("{metron_config_path}/enrichment-splitjoin.properties"),
+             content=Template("enrichment-splitjoin.properties.j2"),
              owner=params.metron_user,
-             group=params.metron_group
-             )
+             group=params.metron_group)
+
+        File(format("{metron_config_path}/enrichment-unified.properties"),
+            content=Template("enrichment-unified.properties.j2"),
+            owner=params.metron_user,
+            group=params.metron_group)
 
         if not metron_service.is_zk_configured(params):
           metron_service.init_zk_config(params)

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
index ccce022..f44d05f 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
@@ -256,20 +256,27 @@ user_settings_hbase_table = status_params.user_settings_hbase_table
 user_settings_hbase_cf = status_params.user_settings_hbase_cf
 
 # Enrichment
+metron_enrichment_topology = status_params.metron_enrichment_topology
 geoip_url = config['configurations']['metron-enrichment-env']['geoip_url']
 enrichment_host_known_hosts = config['configurations']['metron-enrichment-env']['enrichment_host_known_hosts']
+
+# Enrichment - Kafka
 enrichment_kafka_start = config['configurations']['metron-enrichment-env']['enrichment_kafka_start']
 enrichment_input_topic = status_params.enrichment_input_topic
 enrichment_output_topic = config['configurations']['metron-enrichment-env']['enrichment_output_topic']
 enrichment_error_topic = config['configurations']['metron-enrichment-env']['enrichment_error_topic']
 threatintel_error_topic = config['configurations']['metron-enrichment-env']['threatintel_error_topic']
-metron_enrichment_topology = status_params.metron_enrichment_topology
+
+# Enrichment - Storm common parameters
 enrichment_workers = config['configurations']['metron-enrichment-env']['enrichment_workers']
 enrichment_acker_executors = config['configurations']['metron-enrichment-env']['enrichment_acker_executors']
 if not len(enrichment_topology_worker_childopts) == 0:
     enrichment_topology_worker_childopts += ' '
 enrichment_topology_worker_childopts += config['configurations']['metron-enrichment-env']['enrichment_topology_worker_childopts']
 enrichment_topology_max_spout_pending = config['configurations']['metron-enrichment-env']['enrichment_topology_max_spout_pending']
+enrichment_topology = config['configurations']['metron-enrichment-env']['enrichment_topology']
+
+# Enrichment - Split Join topology
 enrichment_join_cache_size = config['configurations']['metron-enrichment-env']['enrichment_join_cache_size']
 threatintel_join_cache_size = config['configurations']['metron-enrichment-env']['threatintel_join_cache_size']
 enrichment_kafka_spout_parallelism = config['configurations']['metron-enrichment-env']['enrichment_kafka_spout_parallelism']
@@ -281,6 +288,16 @@ threat_intel_stellar_parallelism = config['configurations']['metron-enrichment-e
 threat_intel_join_parallelism = config['configurations']['metron-enrichment-env']['threat_intel_join_parallelism']
 kafka_writer_parallelism = config['configurations']['metron-enrichment-env']['kafka_writer_parallelism']
 
+# Enrichment - Unified topology
+unified_kafka_spout_parallelism = config['configurations']['metron-enrichment-env']['unified_kafka_spout_parallelism']
+unified_enrichment_parallelism = config['configurations']['metron-enrichment-env']['unified_enrichment_parallelism']
+unified_threat_intel_parallelism = config['configurations']['metron-enrichment-env']['unified_threat_intel_parallelism']
+unified_kafka_writer_parallelism = config['configurations']['metron-enrichment-env']['unified_kafka_writer_parallelism']
+unified_enrichment_cache_size = config['configurations']['metron-enrichment-env']['unified_enrichment_cache_size']
+unified_threat_intel_cache_size = config['configurations']['metron-enrichment-env']['unified_threat_intel_cache_size']
+unified_enrichment_threadpool_size = config['configurations']['metron-enrichment-env']['unified_enrichment_threadpool_size']
+unified_enrichment_threadpool_type = config['configurations']['metron-enrichment-env']['unified_enrichment_threadpool_type']
+
 # Profiler
 metron_profiler_topology = 'profiler'
 profiler_input_topic = config['configurations']['metron-enrichment-env']['enrichment_output_topic']

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/enrichment-splitjoin.properties.j2
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/enrichment-splitjoin.properties.j2 b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/enrichment-splitjoin.properties.j2
new file mode 100644
index 0000000..a0b21c9
--- /dev/null
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/enrichment-splitjoin.properties.j2
@@ -0,0 +1,63 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+##### Storm #####
+enrichment.workers={{enrichment_workers}}
+enrichment.acker.executors={{enrichment_acker_executors}}
+topology.worker.childopts={{enrichment_topology_worker_childopts}}
+topology.auto-credentials={{topology_auto_credentials}}
+topology.max.spout.pending={{enrichment_topology_max_spout_pending}}
+
+##### Kafka #####
+kafka.zk={{zookeeper_quorum}}
+kafka.broker={{kafka_brokers}}
+kafka.security.protocol={{kafka_security_protocol}}
+
+# One of EARLIEST, LATEST, UNCOMMITTED_EARLIEST, UNCOMMITTED_LATEST
+kafka.start={{enrichment_kafka_start}}
+
+enrichment.input.topic={{enrichment_input_topic}}
+enrichment.output.topic={{enrichment_output_topic}}
+enrichment.error.topic={{enrichment_error_topic}}
+threat.intel.error.topic={{threatintel_error_topic}}
+
+##### JoinBolt #####
+enrichment.join.cache.size={{enrichment_join_cache_size}}
+threat.intel.join.cache.size={{threatintel_join_cache_size}}
+
+##### Enrichment #####
+hbase.provider.impl={{enrichment_hbase_provider_impl}}
+enrichment.simple.hbase.table={{enrichment_hbase_table}}
+enrichment.simple.hbase.cf={{enrichment_hbase_cf}}
+enrichment.host.known_hosts={{enrichment_host_known_hosts}}
+
+##### Threat Intel #####
+threat.intel.tracker.table={{threatintel_hbase_table}}
+threat.intel.tracker.cf={{threatintel_hbase_cf}}
+threat.intel.simple.hbase.table={{threatintel_hbase_table}}
+threat.intel.simple.hbase.cf={{threatintel_hbase_cf}}
+
+##### Parallelism #####
+kafka.spout.parallelism={{enrichment_kafka_spout_parallelism}}
+enrichment.split.parallelism={{enrichment_split_parallelism}}
+enrichment.stellar.parallelism={{enrichment_stellar_parallelism}}
+enrichment.join.parallelism={{enrichment_join_parallelism}}
+threat.intel.split.parallelism={{threat_intel_split_parallelism}}
+threat.intel.stellar.parallelism={{threat_intel_stellar_parallelism}}
+threat.intel.join.parallelism={{threat_intel_join_parallelism}}
+kafka.writer.parallelism={{kafka_writer_parallelism}}

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/enrichment-unified.properties.j2
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/enrichment-unified.properties.j2 b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/enrichment-unified.properties.j2
new file mode 100644
index 0000000..8c28c49
--- /dev/null
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/enrichment-unified.properties.j2
@@ -0,0 +1,60 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+##### Storm #####
+enrichment.workers={{enrichment_workers}}
+enrichment.acker.executors={{enrichment_acker_executors}}
+topology.worker.childopts={{enrichment_topology_worker_childopts}}
+topology.auto-credentials={{topology_auto_credentials}}
+topology.max.spout.pending={{enrichment_topology_max_spout_pending}}
+
+##### Kafka #####
+kafka.zk={{zookeeper_quorum}}
+kafka.broker={{kafka_brokers}}
+kafka.security.protocol={{kafka_security_protocol}}
+kafka.start={{enrichment_kafka_start}}
+enrichment.input.topic={{enrichment_input_topic}}
+enrichment.output.topic={{enrichment_output_topic}}
+enrichment.error.topic={{enrichment_error_topic}}
+threat.intel.error.topic={{threatintel_error_topic}}
+
+##### Enrichment #####
+hbase.provider.impl={{enrichment_hbase_provider_impl}}
+enrichment.simple.hbase.table={{enrichment_hbase_table}}
+enrichment.simple.hbase.cf={{enrichment_hbase_cf}}
+enrichment.host.known_hosts={{enrichment_host_known_hosts}}
+
+##### Threat Intel #####
+threat.intel.tracker.table={{threatintel_hbase_table}}
+threat.intel.tracker.cf={{threatintel_hbase_cf}}
+threat.intel.simple.hbase.table={{threatintel_hbase_table}}
+threat.intel.simple.hbase.cf={{threatintel_hbase_cf}}
+
+##### Parallelism #####
+kafka.spout.parallelism={{unified_kafka_spout_parallelism}}
+enrichment.parallelism={{unified_enrichment_parallelism}}
+threat.intel.parallelism={{unified_threat_intel_parallelism}}
+kafka.writer.parallelism={{unified_kafka_writer_parallelism}}
+
+##### Caches #####
+enrichment.cache.size={{unified_enrichment_cache_size}}
+threat.intel.cache.size={{unified_threat_intel_cache_size}}
+
+##### Threads #####
+enrichment.threadpool.size={{unified_enrichment_threadpool_size}}
+enrichment.threadpool.type={{unified_enrichment_threadpool_type}}

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
index 364b3ef..06bc155 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
@@ -65,7 +65,7 @@
             "display-name": "Enrichment",
             "layout": {
               "tab-columns": "1",
-              "tab-rows": "3",
+              "tab-rows": "5",
               "sections": [
                 {
                   "name": "section-enrichment-adapters",
@@ -123,6 +123,44 @@
                       "column-span": "1"
                     }
                   ]
+                },
+                {
+                  "name": "section-enrichment-splitjoin",
+                  "row-index": "3",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-enrichment-splitjoin",
+                      "display-name": "Split Join Topology",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-enrichment-unified",
+                  "row-index": "4",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-enrichment-unified",
+                      "display-name": "Unified Topology",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
                 }
               ]
             }
@@ -462,44 +500,80 @@
           "subsection-name": "subsection-enrichment-storm"
         },
         {
-          "config": "metron-enrichment-env/enrichment_join_cache_size",
+          "config": "metron-enrichment-env/enrichment_topology",
           "subsection-name": "subsection-enrichment-storm"
         },
         {
+          "config": "metron-enrichment-env/enrichment_join_cache_size",
+          "subsection-name": "subsection-enrichment-splitjoin"
+        },
+        {
           "config": "metron-enrichment-env/threatintel_join_cache_size",
-          "subsection-name": "subsection-enrichment-storm"
+          "subsection-name": "subsection-enrichment-splitjoin"
         },
         {
           "config": "metron-enrichment-env/enrichment_kafka_spout_parallelism",
-          "subsection-name": "subsection-enrichment-storm"
+          "subsection-name": "subsection-enrichment-splitjoin"
         },
         {
           "config": "metron-enrichment-env/enrichment_split_parallelism",
-          "subsection-name": "subsection-enrichment-storm"
+          "subsection-name": "subsection-enrichment-splitjoin"
         },
         {
           "config": "metron-enrichment-env/enrichment_stellar_parallelism",
-          "subsection-name": "subsection-enrichment-storm"
+          "subsection-name": "subsection-enrichment-splitjoin"
         },
         {
           "config": "metron-enrichment-env/enrichment_join_parallelism",
-          "subsection-name": "subsection-enrichment-storm"
+          "subsection-name": "subsection-enrichment-splitjoin"
         },
         {
           "config": "metron-enrichment-env/threat_intel_split_parallelism",
-          "subsection-name": "subsection-enrichment-storm"
+          "subsection-name": "subsection-enrichment-splitjoin"
         },
         {
           "config": "metron-enrichment-env/threat_intel_stellar_parallelism",
-          "subsection-name": "subsection-enrichment-storm"
+          "subsection-name": "subsection-enrichment-splitjoin"
         },
         {
           "config": "metron-enrichment-env/threat_intel_join_parallelism",
-          "subsection-name": "subsection-enrichment-storm"
+          "subsection-name": "subsection-enrichment-splitjoin"
         },
         {
           "config": "metron-enrichment-env/kafka_writer_parallelism",
-          "subsection-name": "subsection-enrichment-storm"
+          "subsection-name": "subsection-enrichment-splitjoin"
+        },
+        {
+          "config": "metron-enrichment-env/unified_kafka_spout_parallelism",
+          "subsection-name": "subsection-enrichment-unified"
+        },
+        {
+          "config": "metron-enrichment-env/unified_enrichment_parallelism",
+          "subsection-name": "subsection-enrichment-unified"
+        },
+        {
+          "config": "metron-enrichment-env/unified_threat_intel_parallelism",
+          "subsection-name": "subsection-enrichment-unified"
+        },
+        {
+          "config": "metron-enrichment-env/unified_kafka_writer_parallelism",
+          "subsection-name": "subsection-enrichment-unified"
+        },
+        {
+          "config": "metron-enrichment-env/unified_enrichment_cache_size",
+          "subsection-name": "subsection-enrichment-unified"
+        },
+        {
+          "config": "metron-enrichment-env/unified_threat_intel_cache_size",
+          "subsection-name": "subsection-enrichment-unified"
+        },
+        {
+          "config": "metron-enrichment-env/unified_enrichment_threadpool_size",
+          "subsection-name": "subsection-enrichment-unified"
+        },
+        {
+          "config": "metron-enrichment-env/unified_enrichment_threadpool_type",
+          "subsection-name": "subsection-enrichment-unified"
         },
         {
           "config": "metron-indexing-env/ra_indexing_kafka_start",
@@ -838,6 +912,12 @@
         }
       },
       {
+        "config": "metron-enrichment-env/enrichment_topology",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
         "config": "metron-enrichment-env/enrichment_join_cache_size",
         "widget": {
           "type": "text-field"
@@ -897,7 +977,54 @@
           "type": "text-field"
         }
       },
-
+      {
+        "config": "metron-enrichment-env/unified_kafka_spout_parallelism",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-enrichment-env/unified_enrichment_parallelism",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-enrichment-env/unified_threat_intel_parallelism",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-enrichment-env/unified_kafka_writer_parallelism",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-enrichment-env/unified_enrichment_cache_size",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-enrichment-env/unified_threat_intel_cache_size",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-enrichment-env/unified_enrichment_threadpool_size",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-enrichment-env/unified_enrichment_threadpool_type",
+        "widget": {
+          "type": "combo"
+        }
+      },
       {
         "config": "metron-indexing-env/batch_indexing_kafka_start",
         "widget": {

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
index 6b35dae..1f40105 100644
--- a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
+++ b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
@@ -273,13 +273,14 @@ This package installs the Metron Enrichment files
 %dir %{metron_home}/flux/enrichment
 %{metron_home}/bin/latency_summarizer.sh
 %{metron_home}/bin/start_enrichment_topology.sh
-%{metron_home}/config/enrichment.properties
+%{metron_home}/config/enrichment-splitjoin.properties
+%{metron_home}/config/enrichment-unified.properties
 %{metron_home}/config/zookeeper/enrichments/bro.json
 %{metron_home}/config/zookeeper/enrichments/snort.json
 %{metron_home}/config/zookeeper/enrichments/websphere.json
 %{metron_home}/config/zookeeper/enrichments/yaf.json
 %{metron_home}/config/zookeeper/enrichments/asa.json
-%{metron_home}/flux/enrichment/remote.yaml
+%{metron_home}/flux/enrichment/remote-splitjoin.yaml
 %{metron_home}/flux/enrichment/remote-unified.yaml
 %attr(0644,root,root) %{metron_home}/lib/metron-enrichment-%{full_version}-uber.jar
 

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/config/enrichment-splitjoin.properties
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/config/enrichment-splitjoin.properties b/metron-platform/metron-enrichment/src/main/config/enrichment-splitjoin.properties
new file mode 100644
index 0000000..109c2ee
--- /dev/null
+++ b/metron-platform/metron-enrichment/src/main/config/enrichment-splitjoin.properties
@@ -0,0 +1,63 @@
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+##### Storm #####
+enrichment.workers=1
+enrichment.acker.executors=0
+topology.worker.childopts=
+topology.auto-credentials=
+topology.max.spout.pending=500
+
+##### Kafka #####
+kafka.zk=node1:2181
+kafka.broker=node1:6667
+kafka.security.protocol=PLAINTEXT
+
+# One of EARLIEST, LATEST, UNCOMMITTED_EARLIEST, UNCOMMITTED_LATEST
+kafka.start=UNCOMMITTED_EARLIEST
+
+enrichment.input.topic=enrichments
+enrichment.output.topic=indexing
+enrichment.error.topic=indexing
+threat.intel.error.topic=indexing
+
+##### JoinBolt #####
+enrichment.join.cache.size=100000
+threat.intel.join.cache.size=100000
+
+##### Enrichment #####
+hbase.provider.impl=org.apache.metron.hbase.HTableProvider
+enrichment.simple.hbase.table=enrichment
+enrichment.simple.hbase.cf=t
+enrichment.host.known_hosts=[{"ip":"10.1.128.236", "local":"YES", "type":"webserver", "asset_value" : "important"},\
+{"ip":"10.1.128.237", "local":"UNKNOWN", "type":"unknown", "asset_value" : "important"},\
+{"ip":"10.60.10.254", "local":"YES", "type":"printer", "asset_value" : "important"}]
+
+##### Threat Intel #####
+threat.intel.tracker.table=access_tracker
+threat.intel.tracker.cf=t
+threat.intel.simple.hbase.table=threatintel
+threat.intel.simple.hbase.cf=t
+
+##### Parallelism #####
+kafka.spout.parallelism=1
+enrichment.split.parallelism=1
+enrichment.stellar.parallelism=1
+enrichment.join.parallelism=1
+threat.intel.split.parallelism=1
+threat.intel.stellar.parallelism=1
+threat.intel.join.parallelism=1
+kafka.writer.parallelism=1

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/config/enrichment-splitjoin.properties.j2
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/config/enrichment-splitjoin.properties.j2 b/metron-platform/metron-enrichment/src/main/config/enrichment-splitjoin.properties.j2
new file mode 100755
index 0000000..a0b21c9
--- /dev/null
+++ b/metron-platform/metron-enrichment/src/main/config/enrichment-splitjoin.properties.j2
@@ -0,0 +1,63 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+##### Storm #####
+enrichment.workers={{enrichment_workers}}
+enrichment.acker.executors={{enrichment_acker_executors}}
+topology.worker.childopts={{enrichment_topology_worker_childopts}}
+topology.auto-credentials={{topology_auto_credentials}}
+topology.max.spout.pending={{enrichment_topology_max_spout_pending}}
+
+##### Kafka #####
+kafka.zk={{zookeeper_quorum}}
+kafka.broker={{kafka_brokers}}
+kafka.security.protocol={{kafka_security_protocol}}
+
+# One of EARLIEST, LATEST, UNCOMMITTED_EARLIEST, UNCOMMITTED_LATEST
+kafka.start={{enrichment_kafka_start}}
+
+enrichment.input.topic={{enrichment_input_topic}}
+enrichment.output.topic={{enrichment_output_topic}}
+enrichment.error.topic={{enrichment_error_topic}}
+threat.intel.error.topic={{threatintel_error_topic}}
+
+##### JoinBolt #####
+enrichment.join.cache.size={{enrichment_join_cache_size}}
+threat.intel.join.cache.size={{threatintel_join_cache_size}}
+
+##### Enrichment #####
+hbase.provider.impl={{enrichment_hbase_provider_impl}}
+enrichment.simple.hbase.table={{enrichment_hbase_table}}
+enrichment.simple.hbase.cf={{enrichment_hbase_cf}}
+enrichment.host.known_hosts={{enrichment_host_known_hosts}}
+
+##### Threat Intel #####
+threat.intel.tracker.table={{threatintel_hbase_table}}
+threat.intel.tracker.cf={{threatintel_hbase_cf}}
+threat.intel.simple.hbase.table={{threatintel_hbase_table}}
+threat.intel.simple.hbase.cf={{threatintel_hbase_cf}}
+
+##### Parallelism #####
+kafka.spout.parallelism={{enrichment_kafka_spout_parallelism}}
+enrichment.split.parallelism={{enrichment_split_parallelism}}
+enrichment.stellar.parallelism={{enrichment_stellar_parallelism}}
+enrichment.join.parallelism={{enrichment_join_parallelism}}
+threat.intel.split.parallelism={{threat_intel_split_parallelism}}
+threat.intel.stellar.parallelism={{threat_intel_stellar_parallelism}}
+threat.intel.join.parallelism={{threat_intel_join_parallelism}}
+kafka.writer.parallelism={{kafka_writer_parallelism}}

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/config/enrichment-unified.properties
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/config/enrichment-unified.properties b/metron-platform/metron-enrichment/src/main/config/enrichment-unified.properties
new file mode 100644
index 0000000..5338ead
--- /dev/null
+++ b/metron-platform/metron-enrichment/src/main/config/enrichment-unified.properties
@@ -0,0 +1,69 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+##### Storm #####
+enrichment.workers=1
+enrichment.acker.executors=0
+topology.worker.childopts=
+topology.auto-credentials=
+topology.max.spout.pending=500
+
+##### Kafka #####
+kafka.zk=node1:2181
+kafka.broker=node1:6667
+kafka.security.protocol=PLAINTEXT
+
+# One of EARLIEST, LATEST, UNCOMMITTED_EARLIEST, UNCOMMITTED_LATEST
+kafka.start=UNCOMMITTED_EARLIEST
+
+enrichment.input.topic=enrichments
+enrichment.output.topic=indexing
+enrichment.error.topic=indexing
+threat.intel.error.topic=indexing
+
+##### JoinBolt #####
+enrichment.join.cache.size=100000
+threat.intel.join.cache.size=100000
+
+##### Enrichment #####
+hbase.provider.impl=org.apache.metron.hbase.HTableProvider
+enrichment.simple.hbase.table=enrichment
+enrichment.simple.hbase.cf=t
+enrichment.host.known_hosts=[{"ip":"10.1.128.236", "local":"YES", "type":"webserver", "asset_value" : "important"},\
+{"ip":"10.1.128.237", "local":"UNKNOWN", "type":"unknown", "asset_value" : "important"},\
+{"ip":"10.60.10.254", "local":"YES", "type":"printer", "asset_value" : "important"}]
+
+##### Threat Intel #####
+threat.intel.tracker.table=access_tracker
+threat.intel.tracker.cf=t
+threat.intel.simple.hbase.table=threatintel
+threat.intel.simple.hbase.cf=t
+
+##### Parallelism #####
+kafka.spout.parallelism=1
+enrichment.parallelism=1
+threat.intel.parallelism=1
+kafka.writer.parallelism=1
+
+##### Caches #####
+enrichment.cache.size=100000
+threat.intel.cache.size=100000
+
+##### Threads #####
+enrichment.threadpool.size=1
+enrichment.threadpool.type=FIXED

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/config/enrichment-unified.properties.j2
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/config/enrichment-unified.properties.j2 b/metron-platform/metron-enrichment/src/main/config/enrichment-unified.properties.j2
new file mode 100644
index 0000000..8c28c49
--- /dev/null
+++ b/metron-platform/metron-enrichment/src/main/config/enrichment-unified.properties.j2
@@ -0,0 +1,60 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+##### Storm #####
+enrichment.workers={{enrichment_workers}}
+enrichment.acker.executors={{enrichment_acker_executors}}
+topology.worker.childopts={{enrichment_topology_worker_childopts}}
+topology.auto-credentials={{topology_auto_credentials}}
+topology.max.spout.pending={{enrichment_topology_max_spout_pending}}
+
+##### Kafka #####
+kafka.zk={{zookeeper_quorum}}
+kafka.broker={{kafka_brokers}}
+kafka.security.protocol={{kafka_security_protocol}}
+kafka.start={{enrichment_kafka_start}}
+enrichment.input.topic={{enrichment_input_topic}}
+enrichment.output.topic={{enrichment_output_topic}}
+enrichment.error.topic={{enrichment_error_topic}}
+threat.intel.error.topic={{threatintel_error_topic}}
+
+##### Enrichment #####
+hbase.provider.impl={{enrichment_hbase_provider_impl}}
+enrichment.simple.hbase.table={{enrichment_hbase_table}}
+enrichment.simple.hbase.cf={{enrichment_hbase_cf}}
+enrichment.host.known_hosts={{enrichment_host_known_hosts}}
+
+##### Threat Intel #####
+threat.intel.tracker.table={{threatintel_hbase_table}}
+threat.intel.tracker.cf={{threatintel_hbase_cf}}
+threat.intel.simple.hbase.table={{threatintel_hbase_table}}
+threat.intel.simple.hbase.cf={{threatintel_hbase_cf}}
+
+##### Parallelism #####
+kafka.spout.parallelism={{unified_kafka_spout_parallelism}}
+enrichment.parallelism={{unified_enrichment_parallelism}}
+threat.intel.parallelism={{unified_threat_intel_parallelism}}
+kafka.writer.parallelism={{unified_kafka_writer_parallelism}}
+
+##### Caches #####
+enrichment.cache.size={{unified_enrichment_cache_size}}
+threat.intel.cache.size={{unified_threat_intel_cache_size}}
+
+##### Threads #####
+enrichment.threadpool.size={{unified_enrichment_threadpool_size}}
+enrichment.threadpool.type={{unified_enrichment_threadpool_type}}

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/config/enrichment.properties
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/config/enrichment.properties b/metron-platform/metron-enrichment/src/main/config/enrichment.properties
deleted file mode 100644
index 9592968..0000000
--- a/metron-platform/metron-enrichment/src/main/config/enrichment.properties
+++ /dev/null
@@ -1,64 +0,0 @@
-#  Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
-##### Storm #####
-enrichment.workers=1
-enrichment.acker.executors=0
-topology.worker.childopts=
-topology.auto-credentials=
-topology.max.spout.pending=
-
-##### Kafka #####
-kafka.zk=node1:2181
-kafka.broker=node1:6667
-kafka.security.protocol=PLAINTEXT
-
-# One of EARLIEST, LATEST, UNCOMMITTED_EARLIEST, UNCOMMITTED_LATEST
-kafka.start=UNCOMMITTED_EARLIEST
-
-enrichment.input.topic=enrichments
-enrichment.output.topic=indexing
-enrichment.error.topic=indexing
-threat.intel.error.topic=indexing
-
-##### JoinBolt #####
-enrichment.join.cache.size=100000
-threat.intel.join.cache.size=100000
-
-##### Enrichment #####
-hbase.provider.impl=org.apache.metron.hbase.HTableProvider
-enrichment.simple.hbase.table=enrichment
-enrichment.simple.hbase.cf=t
-enrichment.host.known_hosts=[{"ip":"10.1.128.236", "local":"YES", "type":"webserver", "asset_value" : "important"},\
-{"ip":"10.1.128.237", "local":"UNKNOWN", "type":"unknown", "asset_value" : "important"},\
-{"ip":"10.60.10.254", "local":"YES", "type":"printer", "asset_value" : "important"}]
-
-
-##### Threat Intel #####
-threat.intel.tracker.table=access_tracker
-threat.intel.tracker.cf=t
-threat.intel.simple.hbase.table=threatintel
-threat.intel.simple.hbase.cf=t
-
-##### Parallelism #####
-kafka.spout.parallelism=1
-enrichment.split.parallelism=1
-enrichment.stellar.parallelism=1
-enrichment.join.parallelism=1
-threat.intel.split.parallelism=1
-threat.intel.stellar.parallelism=1
-threat.intel.join.parallelism=1
-kafka.writer.parallelism=1

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/config/enrichment.properties.j2
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/config/enrichment.properties.j2 b/metron-platform/metron-enrichment/src/main/config/enrichment.properties.j2
deleted file mode 100755
index 133f9c5..0000000
--- a/metron-platform/metron-enrichment/src/main/config/enrichment.properties.j2
+++ /dev/null
@@ -1,63 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-##### Storm #####
-enrichment.workers={{enrichment_workers}}
-enrichment.acker.executors={{enrichment_acker_executors}}
-topology.worker.childopts={{enrichment_topology_worker_childopts}}
-topology.auto-credentials={{topology_auto_credentials}}
-topology.max.spout.pending={{enrichment_topology_max_spout_pending}}
-
-##### Kafka #####
-kafka.zk={{zookeeper_quorum}}
-kafka.broker={{kafka_brokers}}
-kafka.security.protocol={{kafka_security_protocol}}
-
-# One of EARLIEST, LATEST, UNCOMMITTED_EARLIEST, UNCOMMITTED_LATEST
-kafka.start={{enrichment_kafka_start}}
-
-enrichment.input.topic={{enrichment_input_topic}}
-enrichment.output.topic={{enrichment_output_topic}}
-enrichment.error.topic={{enrichment_error_topic}}
-threat.intel.error.topic={{threatintel_error_topic}}
-
-##### JoinBolt #####
-enrichment.join.cache.size={{enrichment_join_cache_size}}
-threat.intel.join.cache.size={{threatintel_join_cache_size}}
-
-##### Enrichment #####
-hbase.provider.impl={{enrichment_hbase_provider_impl}}
-enrichment.simple.hbase.table={{enrichment_hbase_table}}
-enrichment.simple.hbase.cf={{enrichment_hbase_cf}}
-enrichment.host.known_hosts={{enrichment_host_known_hosts}}
-
-##### Threat Intel #####
-threat.intel.tracker.table={{threatintel_hbase_table}}
-threat.intel.tracker.cf={{threatintel_hbase_cf}}
-threat.intel.simple.hbase.table={{threatintel_hbase_table}}
-threat.intel.simple.hbase.cf={{threatintel_hbase_cf}}
-
-##### Parallelism #####
-kafka.spout.parallelism={{enrichment_kafka_spout_parallelism}}
-enrichment.split.parallelism={{enrichment_split_parallelism}}
-enrichment.stellar.parallelism={{enrichment_stellar_parallelism}}
-enrichment.join.parallelism={{enrichment_join_parallelism}}
-threat.intel.split.parallelism={{threat_intel_split_parallelism}}
-threat.intel.stellar.parallelism={{threat_intel_stellar_parallelism}}
-threat.intel.join.parallelism={{threat_intel_join_parallelism}}
-kafka.writer.parallelism={{kafka_writer_parallelism}}

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/flux/enrichment/remote-splitjoin.yaml
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/flux/enrichment/remote-splitjoin.yaml b/metron-platform/metron-enrichment/src/main/flux/enrichment/remote-splitjoin.yaml
new file mode 100644
index 0000000..fd7ceff
--- /dev/null
+++ b/metron-platform/metron-enrichment/src/main/flux/enrichment/remote-splitjoin.yaml
@@ -0,0 +1,590 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name: "enrichment"
+config:
+    topology.workers: ${enrichment.workers}
+    topology.acker.executors: ${enrichment.acker.executors}
+    topology.worker.childopts: ${topology.worker.childopts}
+    topology.auto-credentials: ${topology.auto-credentials}
+    topology.max.spout.pending: ${topology.max.spout.pending}
+
+components:
+
+# Enrichment
+    -   id: "stellarEnrichmentAdapter"
+        className: "org.apache.metron.enrichment.adapters.stellar.StellarAdapter"
+        configMethods:
+            -   name: "ofType"
+                args:
+                    - "ENRICHMENT"
+
+    # Any kafka props for the producer go here.
+    -   id: "kafkaWriterProps"
+        className: "java.util.HashMap"
+        configMethods:
+          -   name: "put"
+              args:
+                  - "security.protocol"
+                  - "${kafka.security.protocol}"
+
+    -   id: "stellarEnrichment"
+        className: "org.apache.metron.enrichment.configuration.Enrichment"
+        constructorArgs:
+            -   "stellar"
+            -   ref: "stellarEnrichmentAdapter"
+
+    -   id: "geoEnrichmentAdapter"
+        className: "org.apache.metron.enrichment.adapters.geo.GeoAdapter"
+    -   id: "geoEnrichment"
+        className: "org.apache.metron.enrichment.configuration.Enrichment"
+        constructorArgs:
+            -   "geo"
+            -   ref: "geoEnrichmentAdapter"
+    -   id: "hostEnrichmentAdapter"
+        className: "org.apache.metron.enrichment.adapters.host.HostFromJSONListAdapter"
+        constructorArgs:
+            - '${enrichment.host.known_hosts}'
+    -   id: "hostEnrichment"
+        className: "org.apache.metron.enrichment.configuration.Enrichment"
+        constructorArgs:
+            -   "host"
+            -   ref: "hostEnrichmentAdapter"
+
+    -   id: "simpleHBaseEnrichmentConfig"
+        className: "org.apache.metron.enrichment.adapters.simplehbase.SimpleHBaseConfig"
+        configMethods:
+            -   name: "withProviderImpl"
+                args:
+                    - "${hbase.provider.impl}"
+            -   name: "withHBaseTable"
+                args:
+                    - "${enrichment.simple.hbase.table}"
+            -   name: "withHBaseCF"
+                args:
+                    - "${enrichment.simple.hbase.cf}"
+    -   id: "simpleHBaseEnrichmentAdapter"
+        className: "org.apache.metron.enrichment.adapters.simplehbase.SimpleHBaseAdapter"
+        configMethods:
+           -    name: "withConfig"
+                args:
+                    - ref: "simpleHBaseEnrichmentConfig"
+    -   id: "simpleHBaseEnrichment"
+        className: "org.apache.metron.enrichment.configuration.Enrichment"
+        constructorArgs:
+          -   "hbaseEnrichment"
+          -   ref: "simpleHBaseEnrichmentAdapter"
+    -   id: "enrichments"
+        className: "java.util.ArrayList"
+        configMethods:
+            -   name: "add"
+                args:
+                    - ref: "geoEnrichment"
+            -   name: "add"
+                args:
+                    - ref: "hostEnrichment"
+            -   name: "add"
+                args:
+                    - ref: "simpleHBaseEnrichment"
+            -   name: "add"
+                args:
+                    - ref: "stellarEnrichment"
+
+    #enrichment error
+    -   id: "enrichmentErrorKafkaWriter"
+        className: "org.apache.metron.writer.kafka.KafkaWriter"
+        configMethods:
+            -   name: "withTopic"
+                args:
+                    - "${enrichment.error.topic}"
+            -   name: "withZkQuorum"
+                args:
+                    - "${kafka.zk}"
+            -   name: "withProducerConfigs"
+                args: 
+                    - ref: "kafkaWriterProps"
+
+# Threat Intel
+    -   id: "stellarThreatIntelAdapter"
+        className: "org.apache.metron.enrichment.adapters.stellar.StellarAdapter"
+        configMethods:
+            -   name: "ofType"
+                args:
+                    - "THREAT_INTEL"
+    -   id: "stellarThreatIntelEnrichment"
+        className: "org.apache.metron.enrichment.configuration.Enrichment"
+        constructorArgs:
+            -   "stellar"
+            -   ref: "stellarThreatIntelAdapter"
+    -   id: "simpleHBaseThreatIntelConfig"
+        className: "org.apache.metron.enrichment.adapters.threatintel.ThreatIntelConfig"
+        configMethods:
+            -   name: "withProviderImpl"
+                args:
+                    - "${hbase.provider.impl}"
+            -   name: "withTrackerHBaseTable"
+                args:
+                    - "${threat.intel.tracker.table}"
+            -   name: "withTrackerHBaseCF"
+                args:
+                    - "${threat.intel.tracker.cf}"
+            -   name: "withHBaseTable"
+                args:
+                    - "${threat.intel.simple.hbase.table}"
+            -   name: "withHBaseCF"
+                args:
+                    - "${threat.intel.simple.hbase.cf}"
+    -   id: "simpleHBaseThreatIntelAdapter"
+        className: "org.apache.metron.enrichment.adapters.threatintel.ThreatIntelAdapter"
+        configMethods:
+           -    name: "withConfig"
+                args:
+                    - ref: "simpleHBaseThreatIntelConfig"
+    -   id: "simpleHBaseThreatIntelEnrichment"
+        className: "org.apache.metron.enrichment.configuration.Enrichment"
+        constructorArgs:
+          -   "hbaseThreatIntel"
+          -   ref: "simpleHBaseThreatIntelAdapter"
+
+    -   id: "threatIntels"
+        className: "java.util.ArrayList"
+        configMethods:
+            -   name: "add"
+                args:
+                    - ref: "simpleHBaseThreatIntelEnrichment"
+            -   name: "add"
+                args:
+                    - ref: "stellarThreatIntelEnrichment"
+
+    #threatintel error
+    -   id: "threatIntelErrorKafkaWriter"
+        className: "org.apache.metron.writer.kafka.KafkaWriter"
+        configMethods:
+            -   name: "withTopic"
+                args:
+                    - "${threat.intel.error.topic}"
+            -   name: "withZkQuorum"
+                args:
+                    - "${kafka.zk}"
+            -   name: "withProducerConfigs"
+                args: 
+                    - ref: "kafkaWriterProps"
+#indexing
+    -   id: "kafkaWriter"
+        className: "org.apache.metron.writer.kafka.KafkaWriter"
+        configMethods:
+            -   name: "withTopic"
+                args:
+                    - "${enrichment.output.topic}"
+            -   name: "withZkQuorum"
+                args:
+                    - "${kafka.zk}"
+            -   name: "withProducerConfigs"
+                args: 
+                    - ref: "kafkaWriterProps"
+
+#kafka/zookeeper
+    # Any kafka props for the consumer go here.
+    -   id: "kafkaProps"
+        className: "java.util.HashMap"
+        configMethods:
+          -   name: "put"
+              args:
+                  - "value.deserializer"
+                  - "org.apache.kafka.common.serialization.ByteArrayDeserializer"
+          -   name: "put"
+              args:
+                  - "key.deserializer"
+                  - "org.apache.kafka.common.serialization.ByteArrayDeserializer"
+          -   name: "put"
+              args:
+                  - "group.id"
+                  - "enrichments"
+          -   name: "put"
+              args:
+                  - "security.protocol"
+                  - "${kafka.security.protocol}"
+
+
+  # The fields to pull out of the kafka messages
+    -   id: "fields"
+        className: "java.util.ArrayList"
+        configMethods:
+          -   name: "add"
+              args:
+                  - "value"
+
+    -   id: "kafkaConfig"
+        className: "org.apache.metron.storm.kafka.flux.SimpleStormKafkaBuilder"
+        constructorArgs:
+          - ref: "kafkaProps"
+          # topic name
+          - "${enrichment.input.topic}"
+          - "${kafka.zk}"
+          - ref: "fields"
+        configMethods:
+            -   name: "setFirstPollOffsetStrategy"
+                args:
+                    - "${kafka.start}"
+
+
+spouts:
+    -   id: "kafkaSpout"
+        className: "org.apache.metron.storm.kafka.flux.StormKafkaSpout"
+        constructorArgs:
+            - ref: "kafkaConfig"
+        parallelism: ${kafka.spout.parallelism}
+
+bolts:
+# Enrichment Bolts
+    -   id: "enrichmentSplitBolt"
+        className: "org.apache.metron.enrichment.bolt.EnrichmentSplitterBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withEnrichments"
+                args:
+                    - ref: "enrichments"
+        parallelism: ${enrichment.split.parallelism}
+
+    -   id: "geoEnrichmentBolt"
+        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withEnrichment"
+                args:
+                    - ref: "geoEnrichment"
+            -   name: "withMaxCacheSize"
+                args: [10000]
+            -   name: "withMaxTimeRetain"
+                args: [10]
+
+    -   id: "stellarEnrichmentBolt"
+        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withEnrichment"
+                args:
+                    - ref: "stellarEnrichment"
+            -   name: "withMaxCacheSize"
+                args: [10000]
+            -   name: "withMaxTimeRetain"
+                args: [10]
+        parallelism: ${enrichment.stellar.parallelism}
+
+    -   id: "hostEnrichmentBolt"
+        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withEnrichment"
+                args:
+                    - ref: "hostEnrichment"
+            -   name: "withMaxCacheSize"
+                args: [10000]
+            -   name: "withMaxTimeRetain"
+                args: [10]
+
+    -   id: "simpleHBaseEnrichmentBolt"
+        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withEnrichment"
+                args:
+                    - ref: "simpleHBaseEnrichment"
+            -   name: "withMaxCacheSize"
+                args: [10000]
+            -   name: "withMaxTimeRetain"
+                args: [10]
+
+    -   id: "enrichmentJoinBolt"
+        className: "org.apache.metron.enrichment.bolt.EnrichmentJoinBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withMaxCacheSize"
+                args: [${enrichment.join.cache.size}]
+            -   name: "withMaxTimeRetain"
+                args: [10]
+        parallelism: ${enrichment.join.parallelism}
+
+    -   id: "enrichmentErrorOutputBolt"
+        className: "org.apache.metron.writer.bolt.BulkMessageWriterBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withMessageWriter"
+                args:
+                    - ref: "enrichmentErrorKafkaWriter"
+
+
+# Threat Intel Bolts
+    -   id: "threatIntelSplitBolt"
+        className: "org.apache.metron.enrichment.bolt.ThreatIntelSplitterBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withEnrichments"
+                args:
+                    - ref: "threatIntels"
+            -   name: "withMessageFieldName"
+                args: ["message"]
+        parallelism: ${threat.intel.split.parallelism}
+
+    -   id: "simpleHBaseThreatIntelBolt"
+        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withEnrichment"
+                args:
+                    - ref: "simpleHBaseThreatIntelEnrichment"
+            -   name: "withMaxCacheSize"
+                args: [10000]
+            -   name: "withMaxTimeRetain"
+                args: [10]
+    -   id: "stellarThreatIntelBolt"
+        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withEnrichment"
+                args:
+                    - ref: "stellarThreatIntelEnrichment"
+            -   name: "withMaxCacheSize"
+                args: [10000]
+            -   name: "withMaxTimeRetain"
+                args: [10]
+        parallelism: ${threat.intel.stellar.parallelism}
+
+    -   id: "threatIntelJoinBolt"
+        className: "org.apache.metron.enrichment.bolt.ThreatIntelJoinBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withMaxCacheSize"
+                args: [${threat.intel.join.cache.size}]
+            -   name: "withMaxTimeRetain"
+                args: [10]
+        parallelism: ${threat.intel.join.parallelism}
+
+    -   id: "threatIntelErrorOutputBolt"
+        className: "org.apache.metron.writer.bolt.BulkMessageWriterBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withMessageWriter"
+                args:
+                    - ref: "threatIntelErrorKafkaWriter"
+
+# Indexing Bolts
+    -   id: "outputBolt"
+        className: "org.apache.metron.writer.bolt.BulkMessageWriterBolt"
+        constructorArgs:
+            - "${kafka.zk}"
+        configMethods:
+            -   name: "withMessageWriter"
+                args:
+                    - ref: "kafkaWriter"
+        parallelism: ${kafka.writer.parallelism}
+
+
+streams:
+#parser
+    -   name: "spout -> enrichmentSplit"
+        from: "kafkaSpout"
+        to: "enrichmentSplitBolt"
+        grouping:
+            type: LOCAL_OR_SHUFFLE
+
+#enrichment
+    -   name: "enrichmentSplit -> host"
+        from: "enrichmentSplitBolt"
+        to: "hostEnrichmentBolt"
+        grouping:
+            streamId: "host"
+            type: FIELDS
+            args: ["message"]
+
+    -   name: "enrichmentSplit -> geo"
+        from: "enrichmentSplitBolt"
+        to: "geoEnrichmentBolt"
+        grouping:
+            streamId: "geo"
+            type: FIELDS
+            args: ["message"]
+
+    -   name: "enrichmentSplit -> stellar"
+        from: "enrichmentSplitBolt"
+        to: "stellarEnrichmentBolt"
+        grouping:
+            streamId: "stellar"
+            type: FIELDS
+            args: ["message"]
+
+
+    -   name: "enrichmentSplit -> simpleHBaseEnrichmentBolt"
+        from: "enrichmentSplitBolt"
+        to: "simpleHBaseEnrichmentBolt"
+        grouping:
+            streamId: "hbaseEnrichment"
+            type: FIELDS
+            args: ["message"]
+
+    -   name: "splitter -> join"
+        from: "enrichmentSplitBolt"
+        to: "enrichmentJoinBolt"
+        grouping:
+            streamId: "message"
+            type: FIELDS
+            args: ["key"]
+
+    -   name: "geo -> join"
+        from: "geoEnrichmentBolt"
+        to: "enrichmentJoinBolt"
+        grouping:
+            streamId: "geo"
+            type: FIELDS
+            args: ["key"]
+
+    -   name: "stellar -> join"
+        from: "stellarEnrichmentBolt"
+        to: "enrichmentJoinBolt"
+        grouping:
+            streamId: "stellar"
+            type: FIELDS
+            args: ["key"]
+
+    -   name: "simpleHBaseEnrichmentBolt -> join"
+        from: "simpleHBaseEnrichmentBolt"
+        to: "enrichmentJoinBolt"
+        grouping:
+            streamId: "hbaseEnrichment"
+            type: FIELDS
+            args: ["key"]
+
+    -   name: "host -> join"
+        from: "hostEnrichmentBolt"
+        to: "enrichmentJoinBolt"
+        grouping:
+            streamId: "host"
+            type: FIELDS
+            args: ["key"]
+
+    # Error output
+    -   name: "geoEnrichmentBolt -> enrichmentErrorOutputBolt"
+        from: "geoEnrichmentBolt"
+        to: "enrichmentErrorOutputBolt"
+        grouping:
+            streamId: "error"
+            type: LOCAL_OR_SHUFFLE
+
+    -   name: "stellarEnrichmentBolt -> enrichmentErrorOutputBolt"
+        from: "stellarEnrichmentBolt"
+        to: "enrichmentErrorOutputBolt"
+        grouping:
+            streamId: "error"
+            type: LOCAL_OR_SHUFFLE
+
+    -   name: "hostEnrichmentBolt -> enrichmentErrorOutputBolt"
+        from: "hostEnrichmentBolt"
+        to: "enrichmentErrorOutputBolt"
+        grouping:
+            streamId: "error"
+            type: LOCAL_OR_SHUFFLE
+
+    -   name: "simpleHBaseEnrichmentBolt -> enrichmentErrorOutputBolt"
+        from: "simpleHBaseEnrichmentBolt"
+        to: "enrichmentErrorOutputBolt"
+        grouping:
+            streamId: "error"
+            type: LOCAL_OR_SHUFFLE
+
+#threat intel
+    -   name: "enrichmentJoin -> threatSplit"
+        from: "enrichmentJoinBolt"
+        to: "threatIntelSplitBolt"
+        grouping:
+            streamId: "message"
+            type: FIELDS
+            args: ["key"]
+
+    -   name: "threatSplit -> simpleHBaseThreatIntel"
+        from: "threatIntelSplitBolt"
+        to: "simpleHBaseThreatIntelBolt"
+        grouping:
+            streamId: "hbaseThreatIntel"
+            type: FIELDS
+            args: ["message"]
+
+    -   name: "threatSplit -> stellarThreatIntel"
+        from: "threatIntelSplitBolt"
+        to: "stellarThreatIntelBolt"
+        grouping:
+            streamId: "stellar"
+            type: FIELDS
+            args: ["message"]
+
+
+    -   name: "simpleHBaseThreatIntel -> join"
+        from: "simpleHBaseThreatIntelBolt"
+        to: "threatIntelJoinBolt"
+        grouping:
+            streamId: "hbaseThreatIntel"
+            type: FIELDS
+            args: ["key"]
+
+    -   name: "stellarThreatIntel -> join"
+        from: "stellarThreatIntelBolt"
+        to: "threatIntelJoinBolt"
+        grouping:
+            streamId: "stellar"
+            type: FIELDS
+            args: ["key"]
+
+    -   name: "threatIntelSplit -> threatIntelJoin"
+        from: "threatIntelSplitBolt"
+        to: "threatIntelJoinBolt"
+        grouping:
+            streamId: "message"
+            type: FIELDS
+            args: ["key"]
+#output
+    -   name: "threatIntelJoin -> output"
+        from: "threatIntelJoinBolt"
+        to: "outputBolt"
+        grouping:
+            streamId: "message"
+            type: LOCAL_OR_SHUFFLE
+
+    # Error output
+    -   name: "simpleHBaseThreatIntelBolt -> threatIntelErrorOutputBolt"
+        from: "simpleHBaseThreatIntelBolt"
+        to: "threatIntelErrorOutputBolt"
+        grouping:
+            streamId: "error"
+            type: LOCAL_OR_SHUFFLE
+
+    -   name: "stellarThreatIntelBolt -> threatIntelErrorOutputBolt"
+        from: "stellarThreatIntelBolt"
+        to: "threatIntelErrorOutputBolt"
+        grouping:
+            streamId: "error"
+            type: LOCAL_OR_SHUFFLE
+

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/flux/enrichment/remote-unified.yaml
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/flux/enrichment/remote-unified.yaml b/metron-platform/metron-enrichment/src/main/flux/enrichment/remote-unified.yaml
index ddc5ffc..d7107d9 100644
--- a/metron-platform/metron-enrichment/src/main/flux/enrichment/remote-unified.yaml
+++ b/metron-platform/metron-enrichment/src/main/flux/enrichment/remote-unified.yaml
@@ -27,13 +27,12 @@ config:
     topology.worker.childopts: ${topology.worker.childopts}
     topology.auto-credentials: ${topology.auto-credentials}
     topology.max.spout.pending: ${topology.max.spout.pending}
-    # Change this if you want to adjust the threadpool size
-    metron.threadpool.size: "2C" # Either a number (e.g. 5) or multiple of cores (e.g. 5C = 5 times the number of cores)
-    # Change this if you want to adjust the threadpool type
-    metron.threadpool.type: "FIXED" # FIXED or WORK_STEALING
+    metron.threadpool.size: ${enrichment.threadpool.size} # Either a number (e.g. 5) or multiple of cores (e.g. 5C = 5 times the number of cores)
+    metron.threadpool.type: ${enrichment.threadpool.type} # FIXED or WORK_STEALING
+
 components:
 
-# Enrichment
+    # enrichment
     -   id: "stellarEnrichmentAdapter"
         className: "org.apache.metron.enrichment.adapters.stellar.StellarAdapter"
         configMethods:
@@ -41,7 +40,7 @@ components:
                 args:
                     - "ENRICHMENT"
 
-    # Any kafka props for the producer go here.
+    # any kafka props for the producer go here.
     -   id: "kafkaWriterProps"
         className: "java.util.HashMap"
         configMethods:
@@ -58,15 +57,18 @@ components:
 
     -   id: "geoEnrichmentAdapter"
         className: "org.apache.metron.enrichment.adapters.geo.GeoAdapter"
+
     -   id: "geoEnrichment"
         className: "org.apache.metron.enrichment.configuration.Enrichment"
         constructorArgs:
             -   "geo"
             -   ref: "geoEnrichmentAdapter"
+
     -   id: "hostEnrichmentAdapter"
         className: "org.apache.metron.enrichment.adapters.host.HostFromJSONListAdapter"
         constructorArgs:
             - '${enrichment.host.known_hosts}'
+
     -   id: "hostEnrichment"
         className: "org.apache.metron.enrichment.configuration.Enrichment"
         constructorArgs:
@@ -85,17 +87,20 @@ components:
             -   name: "withHBaseCF"
                 args:
                     - "${enrichment.simple.hbase.cf}"
+
     -   id: "simpleHBaseEnrichmentAdapter"
         className: "org.apache.metron.enrichment.adapters.simplehbase.SimpleHBaseAdapter"
         configMethods:
            -    name: "withConfig"
                 args:
                     - ref: "simpleHBaseEnrichmentConfig"
+
     -   id: "simpleHBaseEnrichment"
         className: "org.apache.metron.enrichment.configuration.Enrichment"
         constructorArgs:
           -   "hbaseEnrichment"
           -   ref: "simpleHBaseEnrichmentAdapter"
+
     -   id: "enrichments"
         className: "java.util.ArrayList"
         configMethods:
@@ -112,7 +117,7 @@ components:
                 args:
                     - ref: "stellarEnrichment"
 
-    #enrichment error
+    # enrichment error
     -   id: "enrichmentErrorKafkaWriter"
         className: "org.apache.metron.writer.kafka.KafkaWriter"
         configMethods:
@@ -123,21 +128,23 @@ components:
                 args:
                     - "${kafka.zk}"
             -   name: "withProducerConfigs"
-                args: 
+                args:
                     - ref: "kafkaWriterProps"
 
-# Threat Intel
+    # threat Intel
     -   id: "stellarThreatIntelAdapter"
         className: "org.apache.metron.enrichment.adapters.stellar.StellarAdapter"
         configMethods:
             -   name: "ofType"
                 args:
                     - "THREAT_INTEL"
+
     -   id: "stellarThreatIntelEnrichment"
         className: "org.apache.metron.enrichment.configuration.Enrichment"
         constructorArgs:
             -   "stellar"
             -   ref: "stellarThreatIntelAdapter"
+
     -   id: "simpleHBaseThreatIntelConfig"
         className: "org.apache.metron.enrichment.adapters.threatintel.ThreatIntelConfig"
         configMethods:
@@ -156,12 +163,14 @@ components:
             -   name: "withHBaseCF"
                 args:
                     - "${threat.intel.simple.hbase.cf}"
+
     -   id: "simpleHBaseThreatIntelAdapter"
         className: "org.apache.metron.enrichment.adapters.threatintel.ThreatIntelAdapter"
         configMethods:
            -    name: "withConfig"
                 args:
                     - ref: "simpleHBaseThreatIntelConfig"
+
     -   id: "simpleHBaseThreatIntelEnrichment"
         className: "org.apache.metron.enrichment.configuration.Enrichment"
         constructorArgs:
@@ -178,7 +187,7 @@ components:
                 args:
                     - ref: "stellarThreatIntelEnrichment"
 
-    #threatintel error
+    # threatintel error
     -   id: "threatIntelErrorKafkaWriter"
         className: "org.apache.metron.writer.kafka.KafkaWriter"
         configMethods:
@@ -189,9 +198,9 @@ components:
                 args:
                     - "${kafka.zk}"
             -   name: "withProducerConfigs"
-                args: 
+                args:
                     - ref: "kafkaWriterProps"
-#indexing
+    # indexing
     -   id: "kafkaWriter"
         className: "org.apache.metron.writer.kafka.KafkaWriter"
         configMethods:
@@ -202,11 +211,11 @@ components:
                 args:
                     - "${kafka.zk}"
             -   name: "withProducerConfigs"
-                args: 
+                args:
                     - ref: "kafkaWriterProps"
 
-#kafka/zookeeper
-    # Any kafka props for the consumer go here.
+    # kafka/zookeeper
+    # any kafka props for the consumer go here.
     -   id: "kafkaProps"
         className: "java.util.HashMap"
         configMethods:
@@ -228,7 +237,7 @@ components:
                   - "${kafka.security.protocol}"
 
 
-  # The fields to pull out of the kafka messages
+    # the fields to pull out of the kafka messages
     -   id: "fields"
         className: "java.util.ArrayList"
         configMethods:
@@ -251,6 +260,7 @@ components:
 
 
 spouts:
+
     -   id: "kafkaSpout"
         className: "org.apache.metron.storm.kafka.flux.StormKafkaSpout"
         constructorArgs:
@@ -258,7 +268,8 @@ spouts:
         parallelism: ${kafka.spout.parallelism}
 
 bolts:
-# Enrichment Bolts
+
+    # enrichment bolt
     -   id: "enrichmentBolt"
         className: "org.apache.metron.enrichment.bolt.UnifiedEnrichmentBolt"
         constructorArgs:
@@ -268,7 +279,7 @@ bolts:
                 args:
                     - ref: "enrichments"
             -   name: "withMaxCacheSize"
-                args: [${enrichment.join.cache.size}]
+                args: [${enrichment.cache.size}]
             -   name: "withMaxTimeRetain"
                 args: [10]
             -   name: "withCaptureCacheStats"
@@ -278,7 +289,7 @@ bolts:
                     - "ENRICHMENT"
             -   name: "withMessageGetter"
                 args: ["JSON_FROM_POSITION"]
-        parallelism: ${enrichment.join.parallelism}
+        parallelism: ${enrichment.parallelism}
 
     -   id: "enrichmentErrorOutputBolt"
         className: "org.apache.metron.writer.bolt.BulkMessageWriterBolt"
@@ -290,7 +301,7 @@ bolts:
                     - ref: "enrichmentErrorKafkaWriter"
 
 
-# Threat Intel Bolts
+    # threat intel bolts
     -   id: "threatIntelBolt"
         className: "org.apache.metron.enrichment.bolt.UnifiedEnrichmentBolt"
         constructorArgs:
@@ -300,7 +311,7 @@ bolts:
                 args:
                     - ref: "threatIntels"
             -   name: "withMaxCacheSize"
-                args: [${enrichment.join.cache.size}]
+                args: [${threat.intel.cache.size}]
             -   name: "withMaxTimeRetain"
                 args: [10]
             -   name: "withCaptureCacheStats"
@@ -312,7 +323,7 @@ bolts:
                 args: ["message"]
             -   name: "withMessageGetter"
                 args: ["JSON_FROM_FIELD_BY_REFERENCE"]
-        parallelism: ${threat.intel.join.parallelism}
+        parallelism: ${threat.intel.parallelism}
 
     -   id: "threatIntelErrorOutputBolt"
         className: "org.apache.metron.writer.bolt.BulkMessageWriterBolt"
@@ -323,7 +334,7 @@ bolts:
                 args:
                     - ref: "threatIntelErrorKafkaWriter"
 
-# Indexing Bolts
+    # output bolt
     -   id: "outputBolt"
         className: "org.apache.metron.writer.bolt.BulkMessageWriterBolt"
         constructorArgs:
@@ -334,16 +345,16 @@ bolts:
                     - ref: "kafkaWriter"
         parallelism: ${kafka.writer.parallelism}
 
-
 streams:
-#parser
+
+    # parser
     -   name: "spout -> enrichmentBolt"
         from: "kafkaSpout"
         to: "enrichmentBolt"
         grouping:
             type: LOCAL_OR_SHUFFLE
 
-    # Error output
+    # error output
     -   name: "enrichmentBolt -> enrichmentErrorOutputBolt"
         from: "enrichmentBolt"
         to: "enrichmentErrorOutputBolt"
@@ -351,7 +362,7 @@ streams:
             streamId: "error"
             type: LOCAL_OR_SHUFFLE
 
-#threat intel
+    # threat intel
     -   name: "enrichmentBolt -> threatIntelBolt"
         from: "enrichmentBolt"
         to: "threatIntelBolt"
@@ -359,7 +370,7 @@ streams:
             streamId: "message"
             type: LOCAL_OR_SHUFFLE
 
-#output
+    # output
     -   name: "threatIntelBolt -> output"
         from: "threatIntelBolt"
         to: "outputBolt"
@@ -367,12 +378,10 @@ streams:
             streamId: "message"
             type: LOCAL_OR_SHUFFLE
 
-    # Error output
+    # error output
     -   name: "threatIntelBolt -> threatIntelErrorOutputBolt"
         from: "threatIntelBolt"
         to: "threatIntelErrorOutputBolt"
         grouping:
             streamId: "error"
             type: LOCAL_OR_SHUFFLE
-
-


[19/50] [abbrv] metron git commit: METRON-1462: Separate ES and Kibana from Metron Mpack (mmiklavc via mmiklavc) closes apache/metron#943

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboard-bulkload.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboard-bulkload.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboard-bulkload.json
new file mode 100644
index 0000000..037f1c6
--- /dev/null
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboard-bulkload.json
@@ -0,0 +1,88 @@
+{ "create" : { "_id": "all-metron-index", "_type": "index-pattern" } }
+{"title":"*_index_*","timeFieldName":"timestamp","notExpandable":true,"fields":"[{\"name\":\"AA\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"RA\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"RD\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"TC\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"TTLs\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"Z\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\"
 :true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_index\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"_score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_source\",\"type\":\"_source\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"actions\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:end:ts\",\"type\":\"date\",
 \"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"addl\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"analyzer\",\"type\":\"string\",\"count\":0,\"scripted\":false,
 \"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"analyzers\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"answers\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"app\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"arg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"assigned_ip\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"auth_attempts\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"auth_success\",\"type\":\"boolean\",\"count\":0,\"scri
 pted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"basic_constraints:ca\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"basic_constraints:path_len\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"bro_timestamp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"bro_timestamp.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"capture_password\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:curve\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFro
 mDocValues\":true},{\"name\":\"certificate:exponent\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:issuer\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_length\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:not_valid_after\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:not_valid_be
 fore\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:serial\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:sig_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:subject\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:version\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cipher\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cipher_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatab
 le\":true,\"readFromDocValues\":true},{\"name\":\"client\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"command\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"compression_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"conn_state\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"conn_uids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"connect_info\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"curve\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable
 \":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cwd\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"data_channel:orig_h\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:passive\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:resp_h\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:resp_p\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"date\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"depth\",\"type\":\"number\",\"cou
 nt\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dgmlen\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dhcp_host_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dip\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"direction\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dropped\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dst\",\"type\":\"ip\",\"count\"
 :0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"duration\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"end-reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"end_reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"end_reason.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"end_time\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentjoinbolt:joiner:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\
 ":\"enrichments:geo:ip_dst_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\
 "readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchabl
 e\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:end:ts\",\"type\":\"dat
 e\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"error_fields\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"error_hash\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"error_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"established\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ethdst\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ethlen\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ethsrc\"
 ,\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"exception\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"failed_sensor_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"failure_reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_desc\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_mime_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_size\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocV
 alues\":true},{\"name\":\"filename\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"first_received\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"from\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"fuid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"fuids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"guid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"guid.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"re
 adFromDocValues\":true},{\"name\":\"helo\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"history\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_key\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_key_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_p\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"hostname\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true
 ,\"readFromDocValues\":true},{\"name\":\"id\",\"type\":\"conflict\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false,\"conflictDescriptions\":{\"integer\":[\"snort_index_2017.11.06.19\",\"snort_index_2017.11.06.20\",\"snort_index_2017.11.06.21\",\"snort_index_2017.11.06.22\",\"snort_index_2017.11.06.23\",\"snort_index_2017.11.07.00\",\"snort_index_2017.11.07.01\"],\"keyword\":[\"bro_index_2017.11.02.23\",\"bro_index_2017.11.03.00\",\"bro_index_2017.11.03.01\",\"bro_index_2017.11.03.02\",\"bro_index_2017.11.03.03\",\"bro_index_2017.11.03.04\",\"bro_index_2017.11.03.13\",\"bro_index_2017.11.06.19\",\"bro_index_2017.11.06.20\",\"bro_index_2017.11.06.22\",\"bro_index_2017.11.06.23\",\"bro_index_2017.11.07.00\",\"bro_index_2017.11.07.01\"]}},{\"name\":\"iflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"in_reply_to\",\"type\":\"string\",\"count\":
 0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_dst_addr\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_dst_port\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_src_addr\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_src_port\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"iplen\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"is_alert\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"is_orig\",\"type\":\"boolean
 \",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"is_webmail\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"isn\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"issuer_subject\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"kex_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"last_alert\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"last_reply\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"lease_ti
 me\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"local_orig\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"local_resp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"mac\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"mac_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"mailfrom\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"md5\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"
 message\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"method\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"mime_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"missed_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"missing_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"msg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"msg_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true}
 ,{\"name\":\"n\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"next_protocol\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"note\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"notice\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"oct\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"orig_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\
 "name\":\"orig_fuids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"orig_fuids.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"orig_ip_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"orig_mime_types\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"orig_mime_types.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"orig_pkts\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"original_string\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchab
 le\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"original_string.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"overflow_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"p\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"parent_fuid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"passive\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"password\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"path\",\"type\":\"string\",\"count\":0,\"s
 cripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"peer\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"peer_descr\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"pkt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"port_num\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"proto\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"protocol\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"protocol.keyword\",\"type\":\"string\",\"cou
 nt\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"qclass\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"qclass_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"qtype\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"qtype_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"query\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"raw_message\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"raw_message_bytes\",\"typ
 e\":\"unknown\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"rcode\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rcode_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rcptto\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"referrer\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rejected\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"remote_ip\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"reply
 _code\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"reply_msg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"reply_to\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"request_body_len\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resp_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resp_fuids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"resp_fuids.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"read
 FromDocValues\":true},{\"name\":\"resp_ip_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resp_mime_types\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"resp_mime_types.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resp_pkts\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"response_body_len\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"result\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resumed\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,
 \"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"riflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"risn\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"roct\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rpkt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rtag\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rtt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ruflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchabl
 e\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"san:dns\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"san:email\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"san:ip\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"san:uri\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"second_received\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"seen_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sensor:type\",\"type\":\"string\",\"count\":0,\"scripte
 d\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"sensor:type.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"serial\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"server\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"server_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"service\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sha1\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sha256\",\"type\":\"string\",\"cou
 nt\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sig_generator\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sig_id\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sig_rev\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"sip\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"software_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"source\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"source:type\",\"type\":\
 "string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"source:type.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"src\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"src_peer\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"stack\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"start_time\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"status_code\",\"type
 \":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"status_msg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sub\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"subject\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"suppress_for\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tag\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tcpack\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"tcpflags\",
 \"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"tcpseq\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"tcpwindow\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"threat:triage:level\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threat:triage:rules:0:score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threat:triage:score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatinteljoinbolt:joiner:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,
 \"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatintelsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatintelsplitterbolt:splitter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"timedout\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"timestamp\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tls\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"to\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"tos\",\"type\":\"num
 ber\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"total_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"trans_depth\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"trans_id\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ttl\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tunnel_parents\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"uflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"uid\",\"t
 ype\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"unparsed_version\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"uri\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"user\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"user_agent\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"username\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"v
 ersion:addl\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version:major\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version:minor\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version:minor2\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version:minor3\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"x_originating_ip\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true}]"}
+{ "create" : { "_id": "AV-Sj0e2hKs1cXXnFMqF", "_type": "visualization" } }
+{"title":"Welcome to Apache Metron","visState":"{\"title\":\"Welcome to Apache Metron\",\"type\":\"markdown\",\"params\":{\"type\":\"markdown\",\"markdown\":\"This dashboard enables the validation of Apache Metron and the end-to-end functioning of its default sensor suite.  The default sensor suite includes [\\n                            Snort](https://www.snort.org/), [\\n                            Bro](https://www.bro.org/), and [\\n                            YAF](https://tools.netsa.cert.org/yaf/).  One of Apache Metron's primary goals is to simplify the on-boarding of additional sources of telemetry.  In a production deployment these default sensors should be replaced with ones applicable to the target environment.\\n\\nApache Metron enables disparate sources of telemetry to all be viewed under a 'single pane of glass.'  Telemetry from each of the default sensors can be searched, aggregated, summarized, and viewed within this dashboard. This dashboard should be used as a spri
 ngboard upon which to create your own customized dashboards.\\n\\nThe panels below highlight the volume and variety of events that are currently being consumed by Apache Metron.\"},\"aggs\":[],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"query\":{\"match_all\":{}},\"filter\":[]}"}}
+{ "index" : { "_id": "5.6.2", "_type": "config" } }
+{"defaultIndex":"AV-S2e81hKs1cXXnFMqN"}
+{ "create" : { "_id": "AV-dVurck7f2nZ-iH3Ka", "_type": "visualization" } }
+{"title":"Event Count By Type","visState":"{\"title\":\"Event Count By Type\",\"type\":\"histogram\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\",\"defaultYExtents\":false},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"
 times\":[],\"addTimeMarker\":false,\"type\":\"histogram\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"source:type\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"legendOpen\":true,\"colors\":{\"yaf\":\"#CCA300\",\"snort\":\"#C15C17\",\"bro\":\"#F9934E\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, yaf, snort\",\"params\":[\"bro\",\"yaf\",\"snort\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}},{\"match_phrase\":{\"source:type\":\"snort\"}}],\"minimum_sh
 ould_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
+{ "create" : { "_id": "AV-YyJw3PfR7HJex-ZdY", "_type": "visualization" } }
+{"title":"All index TS event count","visState":"{\"title\":\"All index TS event count\",\"type\":\"metrics\",\"params\":{\"id\":\"eac7cbe0-c411-11e7-a0b9-2137696bd057\",\"type\":\"metric\",\"series\":[{\"id\":\"eac7cbe1-c411-11e7-a0b9-2137696bd057\",\"color\":\"#68BC00\",\"split_mode\":\"everything\",\"metrics\":[{\"id\":\"eac7cbe2-c411-11e7-a0b9-2137696bd057\",\"type\":\"count\"}],\"seperate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"number\",\"chart_type\":\"line\",\"line_width\":1,\"point_size\":1,\"fill\":0.5,\"stacked\":\"none\",\"label\":\"Event Count\",\"split_filters\":[{\"color\":\"#68BC00\",\"id\":\"89be23f0-c4af-11e7-ac01-25d5c1ff2e49\"}],\"series_drop_last_bucket\":0}],\"time_field\":\"timestamp\",\"index_pattern\":\"bro_index*,snort_index*,yaf_index*\",\"interval\":\"1y\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"show_legend\":1,\"background_color_rules\":[{\"id\":\"022dc960-c412-11e7-a0b9-2137696bd057\"}],\"bar_color_rules\":[{\"id\":\"21ffb0
 f0-c412-11e7-a0b9-2137696bd057\"}],\"filter\":\"\",\"drop_last_bucket\":0},\"aggs\":[],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"query\":{\"match_all\":{}},\"filter\":[]}"}}
+{ "create" : { "_id": "AV-cBm5JFLIoshSSHghu", "_type": "visualization" } }
+{"title":"All index TS Chart","visState":"{\"title\":\"All index TS Chart\",\"type\":\"metrics\",\"params\":{\"id\":\"eac7cbe0-c411-11e7-a0b9-2137696bd057\",\"type\":\"timeseries\",\"series\":[{\"id\":\"eac7cbe1-c411-11e7-a0b9-2137696bd057\",\"color\":\"rgba(0,156,224,1)\",\"split_mode\":\"terms\",\"metrics\":[{\"id\":\"eac7cbe2-c411-11e7-a0b9-2137696bd057\",\"type\":\"count\"}],\"seperate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"number\",\"chart_type\":\"bar\",\"line_width\":\"1\",\"point_size\":1,\"fill\":0.5,\"stacked\":\"stacked\",\"label\":\"Events\",\"terms_field\":\"source:type\",\"value_template\":\"{{value}}\"}],\"time_field\":\"timestamp\",\"index_pattern\":\"bro*,snort*,yaf*\",\"interval\":\"30s\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"show_legend\":1,\"background_color_rules\":[{\"id\":\"022dc960-c412-11e7-a0b9-2137696bd057\"}],\"bar_color_rules\":[{\"id\":\"21ffb0f0-c412-11e7-a0b9-2137696bd057\"}],\"show_grid\":1,\"drop_last_bucket\":0},\
 "aggs\":[],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"query\":{\"match_all\":{}},\"filter\":[]}"}}
+{ "create" : { "_id": "AV-dXz9Lk7f2nZ-iH3Kb", "_type": "visualization" } }
+{"title":"Event Count Pie Chart","visState":"{\"title\":\"Event Count Pie Chart\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false,\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Events by Source Type\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"source:type\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_ph
 rase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
+{ "create" : { "_id": "AV-ddhh7k7f2nZ-iH3Kx", "_type": "visualization" } }
+{"title":"Flow Location Map","visState":"{\"title\":\"Flow Location Map\",\"type\":\"tile_map\",\"params\":{\"mapType\":\"Scaled Circle Markers\",\"isDesaturated\":true,\"addTooltip\":true,\"heatMaxZoom\":0,\"heatMinOpacity\":0.1,\"heatRadius\":25,\"heatBlur\":15,\"legendPosition\":\"bottomright\",\"mapZoom\":2,\"mapCenter\":[0,0],\"wms\":{\"enabled\":false,\"url\":\"https://basemap.nationalmap.gov/arcgis/services/USGSTopo/MapServer/WMSServer\",\"options\":{\"version\":\"1.3.0\",\"layers\":\"0\",\"format\":\"image/png\",\"transparent\":true,\"attribution\":\"Maps provided by USGS\",\"styles\":\"\"}},\"type\":\"tile_map\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"geohash_grid\",\"schema\":\"segment\",\"params\":{\"field\":\"enrichments:geo:ip_src_addr:location_point\",\"autoPrecision\":true,\"useGeocentroid\":true,\"precision\":2,\"customLabel\":\"Flow Source Locations\"}}],\"listeners\"
 :{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_phrase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
+{ "create" : { "_id": "AV-dfk_gk7f2nZ-iH3K0", "_type": "visualization" } }
+{"title":"Events By Country","visState":"{\"title\":\"Events By Country\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false,\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"enrichments:geo:ip_src_addr:country\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}","uiStateJSON":"{\"spy\":{\"mode\":{\"name\":null,\"fill\":false}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type
 \":\"bro\"}},{\"match_phrase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
+{ "create" : { "_id": "AV-TUPlDgto7-W6O2b3n", "_type": "index-pattern" } }
+{"title":"yaf_index*","timeFieldName":"timestamp","notExpandable":true,"fields":"[{\"name\":\"_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_index\",\"type\":\"string\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"_score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_source\",\"type\":\"_source\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"adapter:geoadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:end:t
 s\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"app\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dip\",\"type\":\"string\",\"count\":0,\"s
 cripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"duration\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"end-reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"end_reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"end_reason.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"end_time\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentjoinbolt:joiner:
 ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichment
 s:geo:ip_dst_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFro
 mDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"sea
 rchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"guid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"guid.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"iflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_dst_addr\",\"type\":\"ip\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_dst_port\",\"type\":\"number\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_src_addr\",\"type\":\"i
 p\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_src_port\",\"type\":\"number\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"isn\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"oct\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"original_string\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"original_string.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"pkt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"prot
 o\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"protocol\",\"type\":\"string\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"protocol.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"riflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"risn\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"roct\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rpkt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":
 \"rtag\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rtt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ruflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sip\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"source:type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"start_time\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tag\"
 ,\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatinteljoinbolt:joiner:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatintelsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatintelsplitterbolt:splitter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"timestamp\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"uflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true}]"}
+{ "create" : { "_id": "AV-eebabk7f2nZ-iH3L1", "_type": "visualization" } }
+{"title":"YAF Flow Duration","visState":"{\"title\":\"YAF Flow Duration\",\"type\":\"area\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"Flow Duration (seconds)\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"area\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"drawLinesBetweenPoints\":true,\"showCircles\":true,\"interpolate\":\"linear\",\"valueAxis\":\"ValueAxis-1\"}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"
 right\",\"times\":[],\"addTimeMarker\":false,\"type\":\"area\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"duration\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Flow Duration (seconds)\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"legendOpen\":false}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-TUPlDgto7-W6O2b3n\",\"query\":{\"match_all\":{}},\"filter\":[]}"}}
+{ "create" : { "_id": "AV-deDqXk7f2nZ-iH3Ky", "_type": "visualization" } }
+{"title":"Geo-IP Locations","visState":"{\"title\":\"Geo-IP Locations\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":\"60\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"enrichments:geo:ip_src_addr:country\",\"customLabel\":\"Unique Location(s)\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0
 ,104,55)\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_phrase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
+{ "create" : { "_id": "AV-YvG0DPfR7HJex-ZaS", "_type": "visualization" } }
+{"title":"Event Count","visState":"{\"title\":\"Event Count\",\"type\":\"metric\",\"params\":{\"addLegend\":false,\"addTooltip\":true,\"gauge\":{\"autoExtend\":false,\"backStyle\":\"Full\",\"colorSchema\":\"Green to Red\",\"colorsRange\":[{\"from\":0,\"to\":100}],\"gaugeColorMode\":\"None\",\"gaugeStyle\":\"Full\",\"gaugeType\":\"Metric\",\"invertColors\":false,\"labels\":{\"color\":\"black\",\"show\":false},\"orientation\":\"vertical\",\"percentageMode\":false,\"scale\":{\"color\":\"#333\",\"labels\":false,\"show\":false,\"width\":2},\"style\":{\"bgColor\":false,\"fontSize\":\"60\",\"labelColor\":false,\"subText\":\"\",\"bgFill\":\"\"},\"type\":\"simple\",\"useRange\":false,\"verticalSplit\":false},\"type\":\"gauge\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Event Count\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","version":1,"kibanaSavedO
 bjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_phrase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
+{ "create" : { "_id": "AV-ejKEdk7f2nZ-iH3MI", "_type": "visualization" } }
+{"title":"Web Requests","visState":"{\"title\":\"Web Requests\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":60,\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-S
 2e81hKs1cXXnFMqN\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"AV-S2e81hKs1cXXnFMqN\",\"type\":\"phrases\",\"key\":\"protocol\",\"value\":\"http, https\",\"params\":[\"http\",\"https\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"protocol\":\"http\"}},{\"match_phrase\":{\"protocol\":\"https\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
+{ "create" : { "_id": "AV-ejbG6k7f2nZ-iH3MJ", "_type": "visualization" } }
+{"title":"DNS Requests","visState":"{\"title\":\"DNS Requests\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":60,\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-S
 2e81hKs1cXXnFMqN\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"AV-S2e81hKs1cXXnFMqN\",\"negate\":false,\"disabled\":false,\"alias\":null,\"type\":\"phrase\",\"key\":\"protocol\",\"value\":\"dns\"},\"query\":{\"match\":{\"protocol\":{\"query\":\"dns\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}}]}"}}
+{ "create" : { "_id": "AV-eh5Wgk7f2nZ-iH3MG", "_type": "visualization" } }
+{"title":"Snort Alert Types","visState":"{\"title\":\"Snort Alert Types\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":60,\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"sig_id\",\"customLabel\":\"Alert Type(s)\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","ver
 sion":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-TAoyPhKs1cXXnFMqi\",\"query\":{\"match_all\":{}},\"filter\":[]}"}}
+{ "create" : { "_id": "AV-ecrFkk7f2nZ-iH3L0", "_type": "visualization" } }
+{"title":"Yaf Flows Count","visState":"{\"title\":\"Yaf Flows Count\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":60,\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":
 \"AV-TUPlDgto7-W6O2b3n\",\"query\":{\"match_all\":{}},\"filter\":[]}"}}
+{ "create" : { "_id": "AV-ek_Jnk7f2nZ-iH3MK", "_type": "visualization" } }
+{"title":"Web Request Type","visState":"{\"title\":\"Web Request Type\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false,\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"method\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-S2e81hKs1cXXnFMqN\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"AV-S2e81hKs1cXXnFMqN\",\"type\":\"phrases\",\"key\":\"protocol\",\"value\":\"http, https\",\"params\":[\"http\",\"https\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"protocol\":\"http\"}},{\"match_phrase\":{\"protocol\":\"https\"}}],\"minimum_should_match\":1}},\
 "$state\":{\"store\":\"appState\"}}]}"}}
+{ "create" : { "_id": "AV-S2e81hKs1cXXnFMqN", "_type": "index-pattern" } }
+{"title":"bro_index*","timeFieldName":"timestamp","notExpandable":true,"fields":"[{\"name\":\"AA\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"RA\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"RD\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"TC\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"TTLs\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"Z\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\
 ":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_index\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"_score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_source\",\"type\":\"_source\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"actions\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:end:ts\",\"type\":\"date\"
 ,\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"addl\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"analyzer\",\"type\":\"string\",\"count\":0,\"scripted\":false
 ,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"analyzers\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"answers\",\"type\":\"string\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"arg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"assigned_ip\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"auth_attempts\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"auth_success\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"basic_constraints:ca\",\"type\":\"boolean\"
 ,\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"basic_constraints:path_len\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"bro_timestamp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"bro_timestamp.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"capture_password\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:curve\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:exponent\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatabl
 e\":true,\"readFromDocValues\":true},{\"name\":\"certificate:issuer\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_length\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:not_valid_after\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:not_valid_before\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":
 \"certificate:serial\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:sig_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:subject\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:version\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cipher\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cipher_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"client\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggreg
 atable\":true,\"readFromDocValues\":true},{\"name\":\"command\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"compression_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"conn_state\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"conn_uids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"connect_info\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"curve\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cwd\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchabl
 e\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"data_channel:orig_h\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:passive\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:resp_h\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:resp_p\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"date\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"depth\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dhcp_host_name\",\"type\":\"st
 ring\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"direction\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dropped\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dst\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"duration\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentjoinbolt:joiner:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues
 \":true},{\"name\":\"enrichments:geo:ip_dst_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\
 "aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"established\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"failure_reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_desc\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDoc
 Values\":true},{\"name\":\"file_mime_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_size\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"filename\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"first_received\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"from\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"fuid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"fuids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":tru
 e,\"readFromDocValues\":true},{\"name\":\"guid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"guid.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"helo\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"history\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host\",\"type\":\"string\",\"count\":2,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_key\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_key_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregata
 ble\":true,\"readFromDocValues\":true},{\"name\":\"host_p\

<TRUNCATED>

[05/50] [abbrv] metron git commit: METRON-1397 Support for JSON Path and complex documents in JSONMapParser closes apache/incubator-metron#914

Posted by rm...@apache.org.
METRON-1397 Support for JSON Path and complex documents in JSONMapParser closes apache/incubator-metron#914


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/9c5d9d76
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/9c5d9d76
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/9c5d9d76

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 9c5d9d76644fc07bae36644906f52e0422f33d0e
Parents: 85d1247
Author: ottobackwards <ot...@gmail.com>
Authored: Thu Mar 15 14:17:31 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Thu Mar 15 14:17:31 2018 -0400

----------------------------------------------------------------------
 dependencies_with_url.csv                       |   3 +
 .../docker/rpm-docker/SPECS/metron.spec         |   1 +
 .../jsonMapQuery/parsed/jsonMapExampleParsed    |   2 +
 .../data/jsonMapQuery/raw/jsonMapExampleOutput  |   1 +
 metron-platform/metron-parsers/README.md        |  12 ++
 metron-platform/metron-parsers/pom.xml          |   5 +
 .../config/zookeeper/parsers/jsonMapQuery.json  |   5 +
 .../metron/parsers/json/JSONMapParser.java      | 145 +++++++++----
 .../JSONMapQueryIntegrationTest.java            |  36 ++++
 .../validation/SampleDataValidation.java        |   2 +-
 .../parsers/json/JSONMapParserQueryTest.java    | 201 +++++++++++++++++++
 .../metron/test/utils/ValidationUtils.java      |  46 ++++-
 12 files changed, 406 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/dependencies_with_url.csv
----------------------------------------------------------------------
diff --git a/dependencies_with_url.csv b/dependencies_with_url.csv
index e2b947b..1e73eb1 100644
--- a/dependencies_with_url.csv
+++ b/dependencies_with_url.csv
@@ -22,6 +22,9 @@ com.flipkart.zjsonpatch:zjsonpatch:jar:0.3.4:compile,Apache v2, https://github.c
 com.google.protobuf:protobuf-java:jar:2.5.0:compile,New BSD license,http://code.google.com/p/protobuf
 com.google.protobuf:protobuf-java:jar:2.6.1:compile,New BSD license,http://code.google.com/p/protobuf
 com.jcraft:jsch:jar:0.1.42:compile,BSD,http://www.jcraft.com/jsch/
+com.jayway.jsonpath:json-path:jar:2.3.0:compile,Apache v2,https://github.com/json-path/JsonPath
+net.minidev:accessors-smart:jar:1.2:compile,Apache v2,https://github.com/netplex/json-smart-v2
+net.minidev:json-smart:jar:2.3:compile,Apache v2,https://github.com/netplex/json-smart-v2
 com.maxmind.db:maxmind-db:jar:1.2.1:compile,CC-BY-SA 3.0,https://github.com/maxmind/MaxMind-DB
 com.maxmind.geoip2:geoip2:jar:2.8.0:compile,Apache v2,https://github.com/maxmind/GeoIP2-java
 com.sun.xml.bind:jaxb-impl:jar:2.2.3-1:compile,CDDL,http://jaxb.java.net/

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
index 265d595..cc01d7c 100644
--- a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
+++ b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
@@ -147,6 +147,7 @@ This package installs the Metron Parser files
 %{metron_home}/bin/start_parser_topology.sh
 %{metron_home}/config/zookeeper/parsers/bro.json
 %{metron_home}/config/zookeeper/parsers/jsonMap.json
+%{metron_home}/config/zookeeper/parsers/jsonMapQuery.json
 %{metron_home}/config/zookeeper/parsers/snort.json
 %{metron_home}/config/zookeeper/parsers/squid.json
 %{metron_home}/config/zookeeper/parsers/websphere.json

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-integration-test/src/main/sample/data/jsonMapQuery/parsed/jsonMapExampleParsed
----------------------------------------------------------------------
diff --git a/metron-platform/metron-integration-test/src/main/sample/data/jsonMapQuery/parsed/jsonMapExampleParsed b/metron-platform/metron-integration-test/src/main/sample/data/jsonMapQuery/parsed/jsonMapExampleParsed
new file mode 100644
index 0000000..e614bda
--- /dev/null
+++ b/metron-platform/metron-integration-test/src/main/sample/data/jsonMapQuery/parsed/jsonMapExampleParsed
@@ -0,0 +1,2 @@
+{ "string" : "bar", "number" : 2, "ignored" : [ "blah" ], "original_string":"{ \"string\" : \"bar\", \"number\" : 2, \"ignored\" : [ \"blah\" ] }","timestamp":1000000000000, "source.type":"jsonMapQuery","guid":"this-is-random-uuid-will-be-36-chars" }
+{ "number" : 7 , "original_string" : "{ \"number\" : 7 }", "source.type":"jsonMapQuery","timestamp":1000000000000,"guid":"this-is-random-uuid-will-be-36-chars"}

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-integration-test/src/main/sample/data/jsonMapQuery/raw/jsonMapExampleOutput
----------------------------------------------------------------------
diff --git a/metron-platform/metron-integration-test/src/main/sample/data/jsonMapQuery/raw/jsonMapExampleOutput b/metron-platform/metron-integration-test/src/main/sample/data/jsonMapQuery/raw/jsonMapExampleOutput
new file mode 100644
index 0000000..8f25f4f
--- /dev/null
+++ b/metron-platform/metron-integration-test/src/main/sample/data/jsonMapQuery/raw/jsonMapExampleOutput
@@ -0,0 +1 @@
+{"foo":[{ "string" : "bar", "number" : 2, "ignored" : [ "blah" ] },{ "number" : 7 }]}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-parsers/README.md
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/README.md b/metron-platform/metron-parsers/README.md
index ade0f51..3d9fdfe 100644
--- a/metron-platform/metron-parsers/README.md
+++ b/metron-platform/metron-parsers/README.md
@@ -43,6 +43,7 @@ There are two general types types of parsers:
       * `UNFOLD` : Unfold inner maps.  So `{ "foo" : { "bar" : 1} }` would turn into `{"foo.bar" : 1}`
       * `ALLOW` : Allow multidimensional maps
       * `ERROR` : Throw an error when a multidimensional map is encountered
+    * `jsonpQuery` : A [JSON Path](#json_path) query string. If present, the result of the JSON Path query should be a list of messages. This is useful if you have a JSON document which contains a list or array of messages embedded in it, and you do not have another means of splitting the message.
     * A field called `timestamp` is expected to exist and, if it does not, then current time is inserted.  
     
 ## Parser Architecture
@@ -520,3 +521,14 @@ be customized by modifying the arguments sent to this utility.
  
 Finally, if workers and executors are new to you, the following might be of use to you:
 * [Understanding the Parallelism of a Storm Topology](http://www.michael-noll.com/blog/2012/10/16/understanding-the-parallelism-of-a-storm-topology/)
+
+## JSON Path
+
+> "JSONPath expressions always refer to a JSON structure in the same way as XPath expression are used in combination with an XML document."
+> ~ Stefan Goessner
+
+
+- [JSON Path concept](http://goessner.net/articles/JsonPath/)
+- [Read about JSON Path library Apache Metron uses](https://github.com/json-path/JsonPath)
+- [Try JSON Path expressions online](http://jsonpath.herokuapp.com)
+

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-parsers/pom.xml
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/pom.xml b/metron-platform/metron-parsers/pom.xml
index f856654..c481864 100644
--- a/metron-platform/metron-parsers/pom.xml
+++ b/metron-platform/metron-parsers/pom.xml
@@ -256,6 +256,11 @@
             <version>2.2.6</version>
             <scope>test</scope>
         </dependency>
+        <dependency>
+            <groupId>com.jayway.jsonpath</groupId>
+            <artifactId>json-path</artifactId>
+            <version>2.3.0</version>
+        </dependency>
     </dependencies>
     <build>
         <plugins>

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-parsers/src/main/config/zookeeper/parsers/jsonMapQuery.json
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/main/config/zookeeper/parsers/jsonMapQuery.json b/metron-platform/metron-parsers/src/main/config/zookeeper/parsers/jsonMapQuery.json
new file mode 100644
index 0000000..7dad779
--- /dev/null
+++ b/metron-platform/metron-parsers/src/main/config/zookeeper/parsers/jsonMapQuery.json
@@ -0,0 +1,5 @@
+{
+  "parserClassName":"org.apache.metron.parsers.json.JSONMapParser",
+  "sensorTopic":"jsonMapQuery",
+  "parserConfig": {"jsonpQuery":"$.foo"}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java b/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java
index 7e5468f..bddf35d 100644
--- a/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java
+++ b/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,65 +15,116 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.metron.parsers.json;
 
 import com.google.common.base.Joiner;
-import com.google.common.collect.ImmutableList;
-import org.apache.metron.common.utils.JSONUtils;
-import org.apache.metron.parsers.BasicParser;
-import org.json.simple.JSONObject;
-
+import com.jayway.jsonpath.Configuration;
+import com.jayway.jsonpath.JsonPath;
+import com.jayway.jsonpath.Option;
+import com.jayway.jsonpath.TypeRef;
+import com.jayway.jsonpath.spi.cache.CacheProvider;
+import com.jayway.jsonpath.spi.cache.LRUCache;
+import com.jayway.jsonpath.spi.json.JacksonJsonProvider;
+import com.jayway.jsonpath.spi.json.JsonProvider;
+import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider;
+import com.jayway.jsonpath.spi.mapper.MappingProvider;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.parsers.BasicParser;
+import org.json.simple.JSONObject;
 
 public class JSONMapParser extends BasicParser {
-  private static interface Handler {
+
+  private interface Handler {
+
     JSONObject handle(String key, Map value, JSONObject obj);
   }
-  public static enum MapStrategy implements Handler {
-     DROP((key, value, obj) -> obj)
-    ,UNFOLD( (key, value, obj) -> {
-      return recursiveUnfold(key,value,obj);
-    })
-    ,ALLOW((key, value, obj) -> {
+
+  @SuppressWarnings("unchecked")
+  public enum MapStrategy implements Handler {
+    DROP((key, value, obj) -> obj), UNFOLD((key, value, obj) -> {
+      return recursiveUnfold(key, value, obj);
+    }), ALLOW((key, value, obj) -> {
       obj.put(key, value);
       return obj;
-    })
-    ,ERROR((key, value, obj) -> {
-      throw new IllegalStateException("Unable to process " + key + " => " + value + " because value is a map.");
-    })
-    ;
+    }), ERROR((key, value, obj) -> {
+      throw new IllegalStateException(
+          "Unable to process " + key + " => " + value + " because value is a map.");
+    });
     Handler handler;
+
     MapStrategy(Handler handler) {
       this.handler = handler;
     }
 
-    private static JSONObject recursiveUnfold(String key, Map value, JSONObject obj){
+    @SuppressWarnings("unchecked")
+    private static JSONObject recursiveUnfold(String key, Map value, JSONObject obj) {
       Set<Map.Entry<Object, Object>> entrySet = value.entrySet();
-      for(Map.Entry<Object, Object> kv : entrySet) {
+      for (Map.Entry<Object, Object> kv : entrySet) {
         String newKey = Joiner.on(".").join(key, kv.getKey().toString());
-        if(kv.getValue() instanceof Map){
-          recursiveUnfold(newKey,(Map)kv.getValue(),obj);
-        }else {
+        if (kv.getValue() instanceof Map) {
+          recursiveUnfold(newKey, (Map) kv.getValue(), obj);
+        } else {
           obj.put(newKey, kv.getValue());
         }
       }
       return obj;
     }
+
     @Override
     public JSONObject handle(String key, Map value, JSONObject obj) {
       return handler.handle(key, value, obj);
     }
 
   }
+
   public static final String MAP_STRATEGY_CONFIG = "mapStrategy";
+  public static final String JSONP_QUERY = "jsonpQuery";
+
   private MapStrategy mapStrategy = MapStrategy.DROP;
+  private TypeRef<List<Map<String, Object>>> typeRef = new TypeRef<List<Map<String, Object>>>() {
+  };
+  private String jsonpQuery = null;
+
 
   @Override
   public void configure(Map<String, Object> config) {
     String strategyStr = (String) config.getOrDefault(MAP_STRATEGY_CONFIG, MapStrategy.DROP.name());
     mapStrategy = MapStrategy.valueOf(strategyStr);
+    if (config.containsKey(JSONP_QUERY)) {
+      jsonpQuery = (String) config.get(JSONP_QUERY);
+      Configuration.setDefaults(new Configuration.Defaults() {
+
+        private final JsonProvider jsonProvider = new JacksonJsonProvider();
+        private final MappingProvider mappingProvider = new JacksonMappingProvider();
+
+        @Override
+        public JsonProvider jsonProvider() {
+          return jsonProvider;
+        }
+
+        @Override
+        public MappingProvider mappingProvider() {
+          return mappingProvider;
+        }
+
+        @Override
+        public Set<Option> options() {
+          return EnumSet.of(Option.SUPPRESS_EXCEPTIONS);
+        }
+      });
+
+      if (CacheProvider.getCache() == null) {
+        CacheProvider.setCache(new LRUCache(100));
+      }
+    }
   }
 
   /**
@@ -87,22 +138,36 @@ public class JSONMapParser extends BasicParser {
   /**
    * Take raw data and convert it to a list of messages.
    *
-   * @param rawMessage
    * @return If null is returned, this is treated as an empty list.
    */
   @Override
+  @SuppressWarnings("unchecked")
   public List<JSONObject> parse(byte[] rawMessage) {
     try {
       String originalString = new String(rawMessage);
-      //convert the JSON blob into a String -> Object map
-      Map<String, Object> rawMap = JSONUtils.INSTANCE.load(originalString, JSONUtils.MAP_SUPPLIER);
-      JSONObject ret = normalizeJSON(rawMap);
-      ret.put("original_string", originalString );
-      if(!ret.containsKey("timestamp")) {
-        //we have to ensure that we have a timestamp.  This is one of the pre-requisites for the parser.
-        ret.put("timestamp", System.currentTimeMillis());
+      List<Map<String, Object>> messages = new ArrayList<>();
+
+      if (!StringUtils.isEmpty(jsonpQuery)) {
+        Object parsedObject = JsonPath.parse(new String(rawMessage)).read(jsonpQuery, typeRef);
+        if(parsedObject != null) {
+          messages.addAll((List<Map<String,Object>>)parsedObject);
+        }
+      } else {
+        messages.add(JSONUtils.INSTANCE.load(originalString, JSONUtils.MAP_SUPPLIER));
+      }
+
+      ArrayList<JSONObject> parsedMessages = new ArrayList<>();
+      for (Map<String, Object> rawMessageMap : messages) {
+        JSONObject originalJsonObject = new JSONObject(rawMessageMap);
+        JSONObject ret = normalizeJson(rawMessageMap);
+        // the original string is the original for THIS sub message
+        ret.put("original_string", originalJsonObject.toJSONString());
+        if (!ret.containsKey("timestamp")) {
+          ret.put("timestamp", System.currentTimeMillis());
+        }
+        parsedMessages.add(ret);
       }
-      return ImmutableList.of(ret);
+      return Collections.unmodifiableList(parsedMessages);
     } catch (Throwable e) {
       String message = "Unable to parse " + new String(rawMessage) + ": " + e.getMessage();
       LOG.error(message, e);
@@ -111,18 +176,16 @@ public class JSONMapParser extends BasicParser {
   }
 
   /**
-   * Process all sub-maps via the MapHandler.  We have standardized on one-dimensional maps as our data model..
-   *
-   * @param map
-   * @return
+   * Process all sub-maps via the MapHandler.
+   * We have standardized on one-dimensional maps as our data model.
    */
-  private JSONObject normalizeJSON(Map<String, Object> map) {
+  @SuppressWarnings("unchecked")
+  private JSONObject normalizeJson(Map<String, Object> map) {
     JSONObject ret = new JSONObject();
-    for(Map.Entry<String, Object> kv : map.entrySet()) {
-      if(kv.getValue() instanceof Map) {
+    for (Map.Entry<String, Object> kv : map.entrySet()) {
+      if (kv.getValue() instanceof Map) {
         mapStrategy.handle(kv.getKey(), (Map) kv.getValue(), ret);
-      }
-      else {
+      } else {
         ret.put(kv.getKey(), kv.getValue());
       }
     }

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/JSONMapQueryIntegrationTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/JSONMapQueryIntegrationTest.java b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/JSONMapQueryIntegrationTest.java
new file mode 100644
index 0000000..cec6242
--- /dev/null
+++ b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/JSONMapQueryIntegrationTest.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.parsers.integration;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.metron.parsers.integration.validation.SampleDataValidation;
+
+public class JSONMapQueryIntegrationTest extends ParserIntegrationTest {
+  @Override
+  String getSensorType() {
+    return "jsonMapQuery";
+  }
+
+  @Override
+  List<ParserValidation> getValidations() {
+    return new ArrayList<ParserValidation>() {{
+      add(new SampleDataValidation());
+    }};
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/validation/SampleDataValidation.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/validation/SampleDataValidation.java b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/validation/SampleDataValidation.java
index 9ea9b71..1dff22f 100644
--- a/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/validation/SampleDataValidation.java
+++ b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/validation/SampleDataValidation.java
@@ -41,7 +41,7 @@ public class SampleDataValidation implements ParserValidation {
       String expectedMessage = new String(expectedMessages.get(i));
       String actualMessage = new String(actualMessages.get(i));
       try {
-        ValidationUtils.assertJSONEqual(expectedMessage, actualMessage);
+        ValidationUtils.assertJsonEqual(expectedMessage, actualMessage);
       } catch (Throwable t) {
         System.out.println("expected: " + expectedMessage);
         System.out.println("actual: " + actualMessage);

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/json/JSONMapParserQueryTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/json/JSONMapParserQueryTest.java b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/json/JSONMapParserQueryTest.java
new file mode 100644
index 0000000..9f8c26b
--- /dev/null
+++ b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/json/JSONMapParserQueryTest.java
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.parsers.json;
+
+import com.google.common.collect.ImmutableMap;
+import java.util.HashMap;
+import java.util.List;
+import org.adrianwalker.multilinestring.Multiline;
+import org.apache.log4j.Level;
+import org.apache.metron.parsers.BasicParser;
+import org.apache.metron.test.utils.UnitTestHelper;
+import org.json.simple.JSONObject;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class JSONMapParserQueryTest {
+
+  /**
+   * {
+   * "foo" :
+   * [
+   * { "name" : "foo1", "value" : "bar", "number" : 1.0 },
+   * { "name" : "foo2", "value" : "baz", "number" : 2.0 }
+   * ]
+   * }
+   */
+  @Multiline
+  static String JSON_LIST;
+
+  /**
+   * { "name" : "foo1", "value" : "bar", "number" : 1.0 }
+   */
+  @Multiline
+  static String JSON_SINGLE;
+
+  /**
+   * { "name" : "foo2", "value" : "baz", "number" : 2.0 }
+   */
+  @Multiline
+  static String JSON_SINGLE2;
+
+  @Test
+  public void testHappyPath() {
+    JSONMapParser parser = new JSONMapParser();
+    parser.configure(new HashMap<String, Object>() {{
+      put(JSONMapParser.JSONP_QUERY, "$.foo");
+    }});
+    List<JSONObject> output = parser.parse(JSON_LIST.getBytes());
+    Assert.assertEquals(output.size(), 2);
+    //don't forget the timestamp field!
+    Assert.assertEquals(output.get(0).size(), 5);
+    JSONObject message = output.get(0);
+    Assert.assertEquals("foo1", message.get("name"));
+    Assert.assertEquals("bar", message.get("value"));
+    Assert.assertEquals(1.0, message.get("number"));
+    Assert.assertNotNull(message.get("timestamp"));
+    Assert.assertTrue(message.get("timestamp") instanceof Number);
+    Assert.assertNotNull(message.get("number"));
+    Assert.assertTrue(message.get("number") instanceof Number);
+
+    message = output.get(1);
+    Assert.assertEquals("foo2", message.get("name"));
+    Assert.assertEquals("baz", message.get("value"));
+    Assert.assertEquals(2.0, message.get("number"));
+    Assert.assertNotNull(message.get("timestamp"));
+    Assert.assertTrue(message.get("timestamp") instanceof Number);
+    Assert.assertNotNull(message.get("number"));
+    Assert.assertTrue(message.get("number") instanceof Number);
+
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void testInvalidJSONPathThrows() {
+    JSONMapParser parser = new JSONMapParser();
+    parser.configure(new HashMap<String, Object>() {{
+      put(JSONMapParser.JSONP_QUERY, "$$..$$SDSE$#$#.");
+    }});
+    List<JSONObject> output = parser.parse(JSON_LIST.getBytes());
+
+  }
+
+  @Test
+  public void testNoMatchesNoExceptions() {
+    JSONMapParser parser = new JSONMapParser();
+    parser.configure(new HashMap<String, Object>() {{
+      put(JSONMapParser.JSONP_QUERY, "$.foo");
+    }});
+    List<JSONObject> output = parser.parse(JSON_SINGLE.getBytes());
+    Assert.assertEquals(0, output.size());
+  }
+
+  /**
+   * {
+   * "foo" :
+   * [
+   * {
+   * "collection" : { "blah" : 7, "blah2" : "foo", "bigblah" : { "innerBlah" : "baz", "reallyInnerBlah" : { "color" : "grey" }}}
+   * },
+   * {
+   * "collection" : { "blah" : 8, "blah2" : "bar", "bigblah" : { "innerBlah" : "baz2", "reallyInnerBlah" : { "color" : "blue" }}}
+   * }
+   * ]
+   * }
+   */
+  @Multiline
+  static String collectionHandlingJSON;
+
+  @Test
+  public void testCollectionHandlingDrop() {
+    JSONMapParser parser = new JSONMapParser();
+    parser.configure(new HashMap<String, Object>() {{
+      put(JSONMapParser.JSONP_QUERY, "$.foo");
+    }});
+    List<JSONObject> output = parser.parse(collectionHandlingJSON.getBytes());
+    Assert.assertEquals(output.size(), 2);
+
+    //don't forget the timestamp field!
+    Assert.assertEquals(output.get(0).size(), 2);
+
+    JSONObject message = output.get(0);
+    Assert.assertNotNull(message.get("timestamp"));
+    Assert.assertTrue(message.get("timestamp") instanceof Number);
+
+    message = output.get(1);
+    Assert.assertNotNull(message.get("timestamp"));
+    Assert.assertTrue(message.get("timestamp") instanceof Number);
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void testCollectionHandlingError() {
+    JSONMapParser parser = new JSONMapParser();
+    parser.configure(ImmutableMap
+        .of(JSONMapParser.MAP_STRATEGY_CONFIG, JSONMapParser.MapStrategy.ERROR.name(),
+            JSONMapParser.JSONP_QUERY, "$.foo"));
+    UnitTestHelper.setLog4jLevel(BasicParser.class, Level.FATAL);
+    parser.parse(collectionHandlingJSON.getBytes());
+    UnitTestHelper.setLog4jLevel(BasicParser.class, Level.ERROR);
+  }
+
+
+  @Test
+  public void testCollectionHandlingAllow() {
+    JSONMapParser parser = new JSONMapParser();
+    parser.configure(ImmutableMap
+        .of(JSONMapParser.MAP_STRATEGY_CONFIG, JSONMapParser.MapStrategy.ALLOW.name(),
+            JSONMapParser.JSONP_QUERY, "$.foo"));
+    List<JSONObject> output = parser.parse(collectionHandlingJSON.getBytes());
+    Assert.assertEquals(output.size(), 2);
+    Assert.assertEquals(output.get(0).size(), 3);
+    JSONObject message = output.get(0);
+    Assert.assertNotNull(message.get("timestamp"));
+    Assert.assertTrue(message.get("timestamp") instanceof Number);
+
+    Assert.assertEquals(output.get(1).size(), 3);
+    message = output.get(1);
+    Assert.assertNotNull(message.get("timestamp"));
+    Assert.assertTrue(message.get("timestamp") instanceof Number);
+  }
+
+  @Test
+  public void testCollectionHandlingUnfold() {
+    JSONMapParser parser = new JSONMapParser();
+    parser.configure(ImmutableMap
+        .of(JSONMapParser.MAP_STRATEGY_CONFIG, JSONMapParser.MapStrategy.UNFOLD.name(),
+            JSONMapParser.JSONP_QUERY, "$.foo"));
+    List<JSONObject> output = parser.parse(collectionHandlingJSON.getBytes());
+    Assert.assertEquals(output.size(), 2);
+    Assert.assertEquals(output.get(0).size(), 6);
+    JSONObject message = output.get(0);
+    Assert.assertEquals(message.get("collection.blah"), 7);
+    Assert.assertEquals(message.get("collection.blah2"), "foo");
+    Assert.assertEquals(message.get("collection.bigblah.innerBlah"), "baz");
+    Assert.assertEquals(message.get("collection.bigblah.reallyInnerBlah.color"), "grey");
+    Assert.assertNotNull(message.get("timestamp"));
+    Assert.assertTrue(message.get("timestamp") instanceof Number);
+
+    Assert.assertEquals(output.get(1).size(), 6);
+    message = output.get(1);
+    Assert.assertEquals(message.get("collection.blah"), 8);
+    Assert.assertEquals(message.get("collection.blah2"), "bar");
+    Assert.assertEquals(message.get("collection.bigblah.innerBlah"), "baz2");
+    Assert.assertEquals(message.get("collection.bigblah.reallyInnerBlah.color"), "blue");
+    Assert.assertNotNull(message.get("timestamp"));
+    Assert.assertTrue(message.get("timestamp") instanceof Number);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/9c5d9d76/metron-platform/metron-test-utilities/src/main/java/org/apache/metron/test/utils/ValidationUtils.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-test-utilities/src/main/java/org/apache/metron/test/utils/ValidationUtils.java b/metron-platform/metron-test-utilities/src/main/java/org/apache/metron/test/utils/ValidationUtils.java
index 279caa3..98fd258 100644
--- a/metron-platform/metron-test-utilities/src/main/java/org/apache/metron/test/utils/ValidationUtils.java
+++ b/metron-platform/metron-test-utilities/src/main/java/org/apache/metron/test/utils/ValidationUtils.java
@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- *     http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -15,33 +15,57 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.metron.test.utils;
 
-import org.codehaus.jackson.map.ObjectMapper;
-import org.junit.Assert;
+package org.apache.metron.test.utils;
 
 import java.io.IOException;
 import java.util.Map;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Assert;
 
 public class ValidationUtils {
 
-  public static void assertJSONEqual(String expected, String actual) throws IOException {
+  /**
+   * Validates that two JSON Strings are equal in value.
+   * Since JSON does not guarentee order of fields, we cannot just compare Strings.
+   * <p>
+   * This utility understands that the 'original_string' field may itself hold JSON,
+   * and will attempt to validate that field as json if it fails straight string compare
+   * </p>
+   * @param expected the expected string value
+   * @param actual the actual string value
+   * @throws IOException if there is an issue parsing as json
+   */
+  public static void assertJsonEqual(String expected, String actual) throws IOException {
     ObjectMapper mapper = new ObjectMapper();
     Map m1 = mapper.readValue(expected, Map.class);
     Map m2 = mapper.readValue(actual, Map.class);
-    for(Object k : m1.keySet()) {
+    for (Object k : m1.keySet()) {
       Object v1 = m1.get(k);
       Object v2 = m2.get(k);
 
-      if(v2 == null) {
+      if (v2 == null) {
         Assert.fail("Unable to find key: " + k + " in output");
       }
-      if(k.equals("timestamp") || k.equals("guid")) {
+      if (k.equals("timestamp") || k.equals("guid")) {
         //TODO: Take the ?!?@ timestamps out of the reference file.
         Assert.assertEquals(v1.toString().length(), v2.toString().length());
-      }
-      else if(!v2.equals(v1)) {
-        Assert.assertEquals("value mismatch for " + k ,v1, v2);
+      } else if (!v2.equals(v1)) {
+        boolean goodDeepDown = false;
+        // if this fails, but is the original_string it may be in json format
+        // where the field/value order may be random
+        if (((String) k).equals("original_string")) {
+          try {
+            mapper.readValue((String) v1, Map.class);
+            assertJsonEqual((String) v1, (String) v2);
+            goodDeepDown = true;
+          } catch (Exception e) {
+            // nothing, the original fail stands
+          }
+        }
+        if (!goodDeepDown) {
+          Assert.assertEquals("value mismatch for " + k, v1, v2);
+        }
       }
     }
     Assert.assertEquals(m1.size(), m2.size());


[18/50] [abbrv] metron git commit: METRON-1462: Separate ES and Kibana from Metron Mpack (mmiklavc via mmiklavc) closes apache/metron#943

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboardindex.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboardindex.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboardindex.py
new file mode 100755
index 0000000..f0903ac
--- /dev/null
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/dashboardindex.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+#
+#  Licensed to the Apache Software Foundation (ASF) under one or more
+#  contributor license agreements.  See the NOTICE file distributed with
+#  this work for additional information regarding copyright ownership.
+#  The ASF licenses this file to You under the Apache License, Version 2.0
+#  (the "License"); you may not use this file except in compliance with
+#  the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+from elasticsearch import Elasticsearch
+from elasticsearch.helpers import bulk
+import cPickle as pickle
+import argparse, sys, os.path
+import errno
+import os
+
+
+class DashboardIndex(object):
+
+    def __init__(self, host='localhost', port=9200, url_prefix='', timeout=10, **kwargs):
+        """
+        :arg host: hostname of the node (default: localhost)
+        :arg port: port to use (integer, default: 9200)
+        :arg url_prefix: optional url prefix for elasticsearch
+        :arg timeout: default timeout in seconds (float, default: 10)
+        """
+        self.es = Elasticsearch([{'host':host,'port': port, 'url_prefix': url_prefix, 'timeout':timeout}])
+
+    def get(self):
+        """
+        Get .kibana index from Elasticsearch
+        """
+        dotkibana = self.es.search(index='.kibana', size = 100)
+        return dotkibana['hits']['hits']
+
+    def load(self,filespec):
+        """
+        Save Index data on local filesystem
+        :args filespec: path/filename for saved file
+        """
+        data=[]
+        with open(filespec,'rb') as fp:
+            data = pickle.load(fp)
+        return data
+
+    def save(self,filename,data):
+        """
+        Save Index data on local filesystem
+        :args filespec: path/filename for saved file
+        """
+        with open(filename,'wb') as fp:
+            pickle.dump(data,fp)
+
+    def put(self,data):
+        """
+        Bulk write data to Elasticsearch
+        :args data: data to be written (note: index name is specified in data)
+        """
+        bulk(self.es,data)
+
+    def main(self,args):
+
+        if args.save:
+            print("running save with host:%s on port %d, filespec: %s" % (args.hostname, args.port, args.filespec))
+            self.save(filename=args.filespec,data=di.get())
+        else:
+            """
+            Loads Kibana Dashboard definition from disk and replaces .kibana on index
+            :args filespec: path/filename for saved file
+            """
+            if not os.path.isfile(args.filespec):
+                raise IOError(
+                    errno.ENOENT, os.strerror(errno.ENOENT), args.filespec)
+            self.es.indices.delete(index='.kibana', ignore=[400, 404])
+            self.put(data=di.load(filespec=args.filespec))
+
+if __name__ == '__main__':
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument("hostname", help="ES Hostname or IP", type=str)
+    parser.add_argument("port", help="ES Port", type=int)
+    parser.add_argument("filespec", help="file to be pushed from or saved to", type=str)
+    parser.add_argument("-s","--save", help="run in SAVE mode - .kibana will be read and saved to filespec",action="store_true")
+    args = parser.parse_args()
+    di = DashboardIndex(host=args.hostname,port=args.port)
+    di.main(args)

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/kibana.template
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/kibana.template b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/kibana.template
new file mode 100644
index 0000000..6f38ed5
--- /dev/null
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/dashboard/kibana.template
@@ -0,0 +1,233 @@
+{
+  "template" : ".kibana",
+    "mappings" : {
+      "search" : {
+        "dynamic" : "strict",
+        "properties" : {
+          "columns" : {
+            "type" : "keyword"
+          },
+          "description" : {
+            "type" : "text"
+          },
+          "hits" : {
+            "type" : "integer"
+          },
+          "kibanaSavedObjectMeta" : {
+            "properties" : {
+              "searchSourceJSON" : {
+                "type" : "text"
+              }
+            }
+          },
+          "sort" : {
+            "type" : "keyword"
+          },
+          "title" : {
+            "type" : "text"
+          },
+          "version" : {
+            "type" : "integer"
+          }
+        }
+      },
+      "url" : {
+        "dynamic" : "strict",
+        "properties" : {
+          "accessCount" : {
+            "type" : "long"
+          },
+          "accessDate" : {
+            "type" : "date"
+          },
+          "createDate" : {
+            "type" : "date"
+          },
+          "url" : {
+            "type" : "text",
+            "fields" : {
+              "keyword" : {
+                "type" : "keyword",
+                "ignore_above" : 2048
+              }
+            }
+          }
+        }
+      },
+      "dashboard" : {
+        "dynamic" : "strict",
+        "properties" : {
+          "description" : {
+            "type" : "text"
+          },
+          "hits" : {
+            "type" : "integer"
+          },
+          "kibanaSavedObjectMeta" : {
+            "properties" : {
+              "searchSourceJSON" : {
+                "type" : "text"
+              }
+            }
+          },
+          "optionsJSON" : {
+            "type" : "text"
+          },
+          "panelsJSON" : {
+            "type" : "text"
+          },
+          "refreshInterval" : {
+            "properties" : {
+              "display" : {
+                "type" : "keyword"
+              },
+              "pause" : {
+                "type" : "boolean"
+              },
+              "section" : {
+                "type" : "integer"
+              },
+              "value" : {
+                "type" : "integer"
+              }
+            }
+          },
+          "timeFrom" : {
+            "type" : "keyword"
+          },
+          "timeRestore" : {
+            "type" : "boolean"
+          },
+          "timeTo" : {
+            "type" : "keyword"
+          },
+          "title" : {
+            "type" : "text"
+          },
+          "uiStateJSON" : {
+            "type" : "text"
+          },
+          "version" : {
+            "type" : "integer"
+          }
+        }
+      },
+      "index-pattern" : {
+        "dynamic" : "strict",
+        "properties" : {
+          "fieldFormatMap" : {
+            "type" : "text"
+          },
+          "fields" : {
+            "type" : "text"
+          },
+          "intervalName" : {
+            "type" : "keyword"
+          },
+          "notExpandable" : {
+            "type" : "boolean"
+          },
+          "sourceFilters" : {
+            "type" : "text"
+          },
+          "timeFieldName" : {
+            "type" : "keyword"
+          },
+          "title" : {
+            "type" : "text"
+          }
+        }
+      },
+      "timelion-sheet" : {
+        "dynamic" : "strict",
+        "properties" : {
+          "description" : {
+            "type" : "text"
+          },
+          "hits" : {
+            "type" : "integer"
+          },
+          "kibanaSavedObjectMeta" : {
+            "properties" : {
+              "searchSourceJSON" : {
+                "type" : "text"
+              }
+            }
+          },
+          "timelion_chart_height" : {
+            "type" : "integer"
+          },
+          "timelion_columns" : {
+            "type" : "integer"
+          },
+          "timelion_interval" : {
+            "type" : "keyword"
+          },
+          "timelion_other_interval" : {
+            "type" : "keyword"
+          },
+          "timelion_rows" : {
+            "type" : "integer"
+          },
+          "timelion_sheet" : {
+            "type" : "text"
+          },
+          "title" : {
+            "type" : "text"
+          },
+          "version" : {
+            "type" : "integer"
+          }
+        }
+      },
+      "visualization" : {
+        "dynamic" : "strict",
+        "properties" : {
+          "description" : {
+            "type" : "text"
+          },
+          "kibanaSavedObjectMeta" : {
+            "properties" : {
+              "searchSourceJSON" : {
+                "type" : "text"
+              }
+            }
+          },
+          "savedSearchId" : {
+            "type" : "keyword"
+          },
+          "title" : {
+            "type" : "text"
+          },
+          "uiStateJSON" : {
+            "type" : "text"
+          },
+          "version" : {
+            "type" : "integer"
+          },
+          "visState" : {
+            "type" : "text"
+          }
+        }
+      },
+      "server" : {
+        "dynamic" : "strict",
+        "properties" : {
+          "uuid" : {
+            "type" : "keyword"
+          }
+        }
+      },
+      "_default_" : {
+        "dynamic" : "strict"
+      },
+      "config" : {
+        "dynamic" : "true",
+        "properties" : {
+          "buildNum" : {
+            "type" : "keyword"
+          }
+        }
+      }
+    }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
index ce8c074..1cd6f4c 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_master.py
@@ -14,8 +14,12 @@ See the License for the specific language governing permissions and
 limitations under the License.
 """
 
+import errno
 import os
 import requests
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
 from resource_management.core.exceptions import ComponentIsNotRunning
 from resource_management.core.logger import Logger
 from resource_management.core.resources.system import Execute
@@ -149,6 +153,34 @@ class Indexing(Script):
               cmd.format(params.es_http_url, template_name),
               logoutput=True)
 
+    @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+    def kibana_dashboard_install(self, env):
+      from params import params
+      env.set_params(params)
+
+      Logger.info("Connecting to Elasticsearch on: %s" % (params.es_http_url))
+
+      kibanaTemplate = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dashboard', 'kibana.template')
+      if not os.path.isfile(kibanaTemplate):
+        raise IOError(
+            errno.ENOENT, os.strerror(errno.ENOENT), kibanaTemplate)
+
+      Logger.info("Loading .kibana index template from %s" % kibanaTemplate)
+      template_cmd = ambari_format(
+          'curl -s -XPOST http://{es_http_url}/_template/.kibana -d @%s' % kibanaTemplate)
+      Execute(template_cmd, logoutput=True)
+
+      kibanaDashboardLoad = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dashboard', 'dashboard-bulkload.json')
+      if not os.path.isfile(kibanaDashboardLoad):
+        raise IOError(
+            errno.ENOENT, os.strerror(errno.ENOENT), kibanaDashboardLoad)
+
+      Logger.info("Loading .kibana dashboard from %s" % kibanaDashboardLoad)
+
+      kibana_cmd = ambari_format(
+          'curl -s -H "Content-Type: application/x-ndjson" -XPOST http://{es_http_url}/.kibana/_bulk --data-binary @%s' % kibanaDashboardLoad)
+      Execute(kibana_cmd, logoutput=True)
+
     def zeppelin_notebook_import(self, env):
         from params import params
         env.set_params(params)

http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/pom.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/pom.xml b/metron-deployment/pom.xml
index fe76ade..f59324c 100644
--- a/metron-deployment/pom.xml
+++ b/metron-deployment/pom.xml
@@ -49,6 +49,7 @@
                 <activeByDefault>true</activeByDefault>
             </activation>
             <modules>
+                <module>packaging/ambari/elasticsearch-mpack</module>
                 <module>packaging/ambari/metron-mpack</module>
             </modules>
         </profile>


[33/50] [abbrv] metron git commit: METRON-1521: JSONMapParser is no longer serializable closes apache/incubator-metron#991

Posted by rm...@apache.org.
METRON-1521: JSONMapParser is no longer serializable closes apache/incubator-metron#991


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/eb5b2d42
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/eb5b2d42
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/eb5b2d42

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: eb5b2d42b83901c05c15e00b9c3c5bce5dd72ab9
Parents: ea6992f
Author: cstella <ce...@gmail.com>
Authored: Thu Apr 12 09:33:12 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Thu Apr 12 09:33:12 2018 -0400

----------------------------------------------------------------------
 .../java/org/apache/metron/parsers/json/JSONMapParser.java     | 4 ++--
 .../org/apache/metron/parsers/integration/ParserDriver.java    | 6 +++++-
 2 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/eb5b2d42/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java b/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java
index bddf35d..f5d67f9 100644
--- a/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java
+++ b/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/json/JSONMapParser.java
@@ -89,8 +89,7 @@ public class JSONMapParser extends BasicParser {
   public static final String JSONP_QUERY = "jsonpQuery";
 
   private MapStrategy mapStrategy = MapStrategy.DROP;
-  private TypeRef<List<Map<String, Object>>> typeRef = new TypeRef<List<Map<String, Object>>>() {
-  };
+  private transient TypeRef<List<Map<String, Object>>> typeRef = null;
   private String jsonpQuery = null;
 
 
@@ -99,6 +98,7 @@ public class JSONMapParser extends BasicParser {
     String strategyStr = (String) config.getOrDefault(MAP_STRATEGY_CONFIG, MapStrategy.DROP.name());
     mapStrategy = MapStrategy.valueOf(strategyStr);
     if (config.containsKey(JSONP_QUERY)) {
+      typeRef = new TypeRef<List<Map<String, Object>>>() { };
       jsonpQuery = (String) config.get(JSONP_QUERY);
       Configuration.setDefaults(new Configuration.Defaults() {
 

http://git-wip-us.apache.org/repos/asf/metron/blob/eb5b2d42/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/ParserDriver.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/ParserDriver.java b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/ParserDriver.java
index deb0217..b03ea80 100644
--- a/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/ParserDriver.java
+++ b/metron-platform/metron-parsers/src/test/java/org/apache/metron/parsers/integration/ParserDriver.java
@@ -18,6 +18,7 @@
 package org.apache.metron.parsers.integration;
 
 import com.google.common.collect.ImmutableList;
+import org.apache.commons.lang.SerializationUtils;
 import org.apache.metron.common.configuration.ConfigurationsUtils;
 import org.apache.metron.common.configuration.FieldValidator;
 import org.apache.metron.common.configuration.ParserConfigurations;
@@ -42,6 +43,7 @@ import org.mockito.Matchers;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -54,7 +56,7 @@ import static org.mockito.Mockito.when;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class ParserDriver {
+public class ParserDriver implements Serializable {
   private static final Logger LOG = LoggerFactory.getLogger(ParserBolt.class);
   public static class CollectingWriter implements MessageWriter<JSONObject>{
     List<byte[]> output;
@@ -151,6 +153,8 @@ public class ParserDriver {
 
   public ProcessorResult<List<byte[]>> run(List<byte[]> in) {
     ShimParserBolt bolt = new ShimParserBolt(new ArrayList<>());
+    byte[] b = SerializationUtils.serialize(bolt);
+    ShimParserBolt b2 = (ShimParserBolt) SerializationUtils.deserialize(b);
     OutputCollector collector = mock(OutputCollector.class);
     bolt.prepare(null, null, collector);
     for(byte[] record : in) {


[36/50] [abbrv] metron git commit: METRON-1519 Indexing Error Topic Property Not Displayed in MPack (nickwallen) closes apache/metron#987

Posted by rm...@apache.org.
METRON-1519 Indexing Error Topic Property Not Displayed in MPack (nickwallen) closes apache/metron#987


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/f8b7c585
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/f8b7c585
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/f8b7c585

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: f8b7c5852310a088bd15ca5d21ba4f98b51521be
Parents: bfe90ef
Author: nickwallen <ni...@nickallen.org>
Authored: Fri Apr 13 14:47:17 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Fri Apr 13 14:47:17 2018 -0400

----------------------------------------------------------------------
 .../common-services/METRON/CURRENT/themes/metron_theme.json    | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/f8b7c585/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
index 234b551..364b3ef 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
@@ -514,6 +514,10 @@
           "subsection-name": "subsection-indexing-kafka"
         },
         {
+          "config": "metron-indexing-env/indexing_error_topic",
+          "subsection-name": "subsection-indexing-kafka"
+        },
+        {
           "config": "metron-indexing-env/update_hbase_table",
           "subsection-name": "subsection-indexing-update"
         },
@@ -553,7 +557,6 @@
           "config": "metron-indexing-env/batch_indexing_topology_max_spout_pending",
           "subsection-name": "subsection-indexing-hdfs"
         },
-
         {
           "config": "metron-indexing-env/ra_indexing_kafka_spout_parallelism",
           "subsection-name": "subsection-indexing-storm"
@@ -562,7 +565,6 @@
           "config": "metron-indexing-env/batch_indexing_kafka_spout_parallelism",
           "subsection-name": "subsection-indexing-hdfs"
         },
-
         {
           "config": "metron-indexing-env/ra_indexing_writer_parallelism",
           "subsection-name": "subsection-indexing-storm"


[12/50] [abbrv] metron git commit: METRON-590 Enable Use of Event Time in Profiler (nickwallen) closes apache/metron#965

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/StandAloneProfilerTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/StandAloneProfilerTest.java b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/StandAloneProfilerTest.java
new file mode 100644
index 0000000..2269c86
--- /dev/null
+++ b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/StandAloneProfilerTest.java
@@ -0,0 +1,255 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler;
+
+import org.adrianwalker.multilinestring.Multiline;
+import org.apache.metron.common.configuration.profiler.ProfilerConfig;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.stellar.dsl.Context;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests the StandAloneProfiler class.
+ */
+public class StandAloneProfilerTest {
+
+  /**
+   * {
+   *   "profiles": [
+   *   ]
+   * }
+   */
+  @Multiline
+  private String noProfiles;
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "profile1",
+   *        "foreach": "'global'",
+   *        "init": { "count": 0 },
+   *        "update": { "count": "count + 1" },
+   *        "result": "count"
+   *      }
+   *   ]
+   * }
+   */
+  @Multiline
+  private String oneProfile;
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "profile1",
+   *        "foreach": "'global1'",
+   *        "result": "'result'"
+   *      },
+   *      {
+   *        "profile": "profile2",
+   *        "foreach": "'global2'",
+   *        "result": "'result'"
+   *      }
+   *   ]
+   * }
+   */
+  @Multiline
+  private String twoProfiles;
+
+  /**
+   * {
+   *   "ip_src_addr": "10.0.0.1",
+   *   "ip_dst_addr": "10.0.0.20",
+   *   "protocol": "HTTP",
+   *   "timestamp": 2222222222222,
+   * }
+   */
+  @Multiline
+  private String messageJson;
+
+  private JSONObject message;
+
+  private long periodDurationMillis = TimeUnit.MINUTES.toMillis(15);
+
+  private Context context = Context.EMPTY_CONTEXT();
+
+  @Before
+  public void setup() throws Exception {
+
+    // parse the input message
+    JSONParser parser = new JSONParser();
+    message = (JSONObject) parser.parse(messageJson);
+  }
+
+  @Test
+  public void testWithOneProfile() throws Exception {
+
+    StandAloneProfiler profiler = createProfiler(oneProfile);
+    profiler.apply(message);
+    profiler.apply(message);
+    profiler.apply(message);
+
+    List<ProfileMeasurement> measurements = profiler.flush();
+    assertEquals(1, measurements.size());
+
+    // expect 1 measurement for the 1 profile that has been defined
+    ProfileMeasurement m = measurements.get(0);
+    assertEquals("profile1", m.getProfileName());
+    assertEquals(3, m.getProfileValue());
+  }
+
+
+  @Test
+  public void testWithTwoProfiles() throws Exception {
+
+    StandAloneProfiler profiler = createProfiler(twoProfiles);
+    profiler.apply(message);
+    profiler.apply(message);
+    profiler.apply(message);
+
+    List<ProfileMeasurement> measurements = profiler.flush();
+    assertEquals(2, measurements.size());
+
+    // expect 2 measurements, 1 for each profile
+    List<String> expected = Arrays.asList(new String[] { "profile1", "profile2" });
+    {
+      ProfileMeasurement m = measurements.get(0);
+      assertTrue(expected.contains(m.getProfileName()));
+      assertEquals("result", m.getProfileValue());
+    }
+    {
+      ProfileMeasurement m = measurements.get(1);
+      assertTrue(expected.contains(m.getProfileName()));
+      assertEquals("result", m.getProfileValue());
+    }
+  }
+
+  /**
+   * The message count and route count will always be equal, if there is only one
+   * profile defined.  The message count and route count can be different when there
+   * are multiple profiles defined that each use the same message.
+   */
+  @Test
+  public void testRouteAndMessageCounters() throws Exception {
+    {
+      StandAloneProfiler profiler = createProfiler(noProfiles);
+
+      profiler.apply(message);
+      assertEquals(1, profiler.getMessageCount());
+      assertEquals(0, profiler.getRouteCount());
+
+      profiler.apply(message);
+      assertEquals(2, profiler.getMessageCount());
+      assertEquals(0, profiler.getRouteCount());
+
+      profiler.apply(message);
+      assertEquals(3, profiler.getMessageCount());
+      assertEquals(0, profiler.getRouteCount());
+    }
+    {
+      StandAloneProfiler profiler = createProfiler(oneProfile);
+
+      profiler.apply(message);
+      assertEquals(1, profiler.getMessageCount());
+      assertEquals(1, profiler.getRouteCount());
+
+      profiler.apply(message);
+      assertEquals(2, profiler.getMessageCount());
+      assertEquals(2, profiler.getRouteCount());
+
+      profiler.apply(message);
+      assertEquals(3, profiler.getMessageCount());
+      assertEquals(3, profiler.getRouteCount());
+    }
+    {
+      StandAloneProfiler profiler = createProfiler(twoProfiles);
+
+      profiler.apply(message);
+      assertEquals(1, profiler.getMessageCount());
+      assertEquals(2, profiler.getRouteCount());
+
+      profiler.apply(message);
+      assertEquals(2, profiler.getMessageCount());
+      assertEquals(4, profiler.getRouteCount());
+
+      profiler.apply(message);
+      assertEquals(3, profiler.getMessageCount());
+      assertEquals(6, profiler.getRouteCount());
+    }
+  }
+
+  @Test
+  public void testProfileCount() throws Exception {
+    {
+      StandAloneProfiler profiler = createProfiler(noProfiles);
+      assertEquals(0, profiler.getProfileCount());
+    }
+    {
+      StandAloneProfiler profiler = createProfiler(oneProfile);
+      assertEquals(1, profiler.getProfileCount());
+    }
+    {
+      StandAloneProfiler profiler = createProfiler(twoProfiles);
+      assertEquals(2, profiler.getProfileCount());
+    }
+  }
+
+  /**
+   * Creates a ProfilerConfig based on a string containing JSON.
+   *
+   * @param configAsJSON The config as JSON.
+   * @return The ProfilerConfig.
+   * @throws Exception
+   */
+  private ProfilerConfig toProfilerConfig(String configAsJSON) throws Exception {
+
+    InputStream in = new ByteArrayInputStream(configAsJSON.getBytes("UTF-8"));
+    return JSONUtils.INSTANCE.load(in, ProfilerConfig.class);
+  }
+
+  /**
+   * Creates the StandAloneProfiler
+   *
+   * @param profileJson The Profiler configuration to use as a String containing JSON.
+   * @throws Exception
+   */
+  private StandAloneProfiler createProfiler(String profileJson) throws Exception {
+
+    // the TTL and max routes need not be bounded
+    long profileTimeToLiveMillis = Long.MAX_VALUE;
+    long maxNumberOfRoutes = Long.MAX_VALUE;
+
+    ProfilerConfig config = toProfilerConfig(profileJson);
+    return new StandAloneProfiler(config, periodDurationMillis, profileTimeToLiveMillis, maxNumberOfRoutes, context);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/DefaultClockFactoryTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/DefaultClockFactoryTest.java b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/DefaultClockFactoryTest.java
new file mode 100644
index 0000000..c99b401
--- /dev/null
+++ b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/DefaultClockFactoryTest.java
@@ -0,0 +1,75 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.clock;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.metron.common.configuration.profiler.ProfilerConfig;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Optional;
+
+/**
+ * Tests the DefaultClockFactory.
+ */
+public class DefaultClockFactoryTest {
+
+  /**
+   * The object under test.
+   */
+  private DefaultClockFactory clockFactory;
+
+  @Before
+  public void setup() {
+    clockFactory = new DefaultClockFactory();
+  }
+
+  /**
+   * When a 'timestampField' is defined the factory should return a clock
+   * that deals with event time.
+   */
+  @Test
+  public void testCreateEventTimeClock() {
+
+    // configure the profiler to use event time
+    ProfilerConfig config = new ProfilerConfig();
+    config.setTimestampField(Optional.of("timestamp"));
+
+    // the factory should return a clock that handles 'event time'
+    Clock clock = clockFactory.createClock(config);
+    assertTrue(clock instanceof EventTimeClock);
+  }
+
+  /**
+   * When a 'timestampField' is defined the factory should return a clock
+   * that deals with processing time.
+   */
+  @Test
+  public void testCreateProcessingTimeClock() {
+
+    // the profiler uses processing time by default
+    ProfilerConfig config = new ProfilerConfig();
+
+    // the factory should return a clock that handles 'processing time'
+    Clock clock = clockFactory.createClock(config);
+    assertTrue(clock instanceof WallClock);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/EventTimeClockTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/EventTimeClockTest.java b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/EventTimeClockTest.java
new file mode 100644
index 0000000..0397250
--- /dev/null
+++ b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/EventTimeClockTest.java
@@ -0,0 +1,115 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.clock;
+
+import org.json.simple.JSONObject;
+import org.junit.Test;
+
+import java.util.Optional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class EventTimeClockTest {
+
+  private final String timestampField = "timestamp";
+
+  public JSONObject createMessage() {
+    return new JSONObject();
+  }
+
+  /**
+   * The event time should be extracted from a field contained within a message.
+   */
+  @Test
+  public void testEventTime() {
+
+    JSONObject message = createMessage();
+
+    // add a field containing a timestamp to the message
+    final Long timestamp = System.currentTimeMillis();
+    message.put(timestampField, timestamp);
+
+    // what time is it?
+    EventTimeClock clock = new EventTimeClock(timestampField);
+    Optional<Long> result = clock.currentTimeMillis(message);
+
+    // validate
+    assertTrue(result.isPresent());
+    assertEquals(timestamp, result.get());
+  }
+
+  /**
+   * If the timestamp field is a String, it should be converted to Long and used as-is.
+   */
+  @Test
+  public void testEventTimeWithString() {
+    JSONObject message = createMessage();
+
+    // the timestamp field is a string
+    final Long timestamp = System.currentTimeMillis();
+    message.put(timestampField, timestamp.toString());
+
+    // what time is it?
+    EventTimeClock clock = new EventTimeClock(timestampField);
+    Optional<Long> result = clock.currentTimeMillis(message);
+
+    // validate
+    assertTrue(result.isPresent());
+    assertEquals(timestamp, result.get());
+  }
+
+  /**
+   * If the message does not contain the timestamp field, then nothing should be returned.
+   */
+  @Test
+  public void testMissingTimestampField() {
+
+    // no timestamp added to the message
+    JSONObject message = createMessage();
+
+    // what time is it?
+    EventTimeClock clock = new EventTimeClock(timestampField);
+    Optional<Long> result = clock.currentTimeMillis(message);
+
+    // validate
+    assertFalse(result.isPresent());
+  }
+
+  /**
+   * No timestamp should be returned if the value stored in the timestamp field
+   * cannot be coerced into a valid timestamp.
+   */
+  @Test
+  public void testInvalidValue() {
+
+    // create a message with an invalid value stored in the timestamp field
+    JSONObject message = createMessage();
+    message.put(timestampField, "invalid-timestamp-value");
+
+    // what time is it?
+    EventTimeClock clock = new EventTimeClock(timestampField);
+    Optional<Long> result = clock.currentTimeMillis(message);
+
+    // no value should be returned
+    assertFalse(result.isPresent());
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/WallClockTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/WallClockTest.java b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/WallClockTest.java
new file mode 100644
index 0000000..76b2d7b
--- /dev/null
+++ b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/clock/WallClockTest.java
@@ -0,0 +1,54 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.clock;
+
+import org.json.simple.JSONObject;
+import org.junit.Test;
+
+import java.util.Optional;
+
+import static org.junit.Assert.assertTrue;
+
+public class WallClockTest {
+
+  public JSONObject createMessage() {
+    return new JSONObject();
+  }
+
+  /**
+   * The wall clock time ALWAYS comes from the system clock.
+   */
+  @Test
+  public void testCurrentTimeMillis() {
+
+    JSONObject message = createMessage();
+    long before = System.currentTimeMillis();
+
+    // what time is it?
+    WallClock clock = new WallClock();
+    Optional<Long> result = clock.currentTimeMillis(message);
+
+    // validate
+    long after = System.currentTimeMillis();
+    assertTrue(result.isPresent());
+    assertTrue(result.get() >= before);
+    assertTrue(result.get() <= after);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/README.md
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/README.md b/metron-analytics/metron-profiler/README.md
index dc5ec07..218ec66 100644
--- a/metron-analytics/metron-profiler/README.md
+++ b/metron-analytics/metron-profiler/README.md
@@ -328,6 +328,62 @@ Continuing the previous running example, at this point, you have seen how your p
 
 ## Anatomy of a Profile
 
+### Profiler
+
+The Profiler configuration contains only two fields; only one of which is required.
+
+```
+{
+    "profiles": [
+        { "profile": "one", ... },
+        { "profile": "two", ... }
+    ],
+    "timestampField": "timestamp"
+}
+```
+
+| Name                              |               | Description
+|---                                |---            |---
+| [profiles](#profiles)             | Required      | A list of zero or more Profile definitions.
+| [timestampField](#timestampfield) | Optional      | Indicates whether processing time or event time should be used. By default, processing time is enabled.
+
+
+#### `profiles`
+
+*Required*
+
+A list of zero or more Profile definitions.
+
+#### `timestampField`
+
+*Optional*
+
+Indicates whether processing time or event time is used. By default, processing time is enabled.
+
+##### Processing Time
+
+By default, no `timestampField` is defined.  In this case, the Profiler uses system time when generating profiles.  This means that the profiles are generated based on when the data has been processed by the Profiler.  This is also known as 'processing time'.
+
+This is the simplest mode of operation, but has some draw backs.  If the Profiler is consuming live data and all is well, the processing and event times will likely remain similar and consistent. If processing time diverges from event time, then the Profiler will generate skewed profiles. 
+
+There are a few scenarios that might cause skewed profiles when using processing time.  For example when a system has undergone a scheduled maintenance window and is restarted, a high volume of messages will need to be processed by the Profiler. The output of the Profiler might indicate an increase in activity during this time, although no change in activity actually occurred on the target network. The same situation could occur if an upstream system which provides telemetry undergoes an outage.  
+
+[Event Time](#event-time) can be used to mitigate these problems.
+
+##### Event Time
+
+Alternatively, a `timestampField` can be defined.  This must be the name of a field contained within the telemetry processed by the Profiler.  The Profiler will extract and use the timestamp contained within this field.
+
+* If a message does not contain this field, it will be dropped.
+
+* The field must contain a timestamp in epoch milliseconds expressed as either a numeric or string. Otherwise, the message will be dropped.
+
+* The Profiler will use the same field across all telemetry sources and for all profiles.
+
+* Be aware of clock skew across telemetry sources.  If your profile is processing telemetry from multiple sources where the clock differs significantly, the Profiler may assume that some of those messages are late and will be ignored.  Adjusting the [`profiler.window.duration`](#profilerwindowduration) and [`profiler.window.lag`](#profilerwindowlag) can help accommodate skewed clocks. 
+
+### Profiles
+
 A profile definition requires a JSON-formatted set of elements, many of which can contain Stellar code.  The specification contains the following elements.  (For the impatient, skip ahead to the [Examples](#examples).)
 
 | Name                          |               | Description
@@ -466,15 +522,19 @@ The values can be changed on disk and then the Profiler topology must be restart
 
 | Setting                                                                       | Description
 |---                                                                            |---
-| [`profiler.input.topic`](#profilerinputtopic)                                 | The name of the Kafka topic from which to consume data.
-| [`profiler.output.topic`](#profileroutputtopic)                               | The name of the Kafka topic to which profile data is written.  Only used with profiles that define the [`triage` result field](#result).
+| [`profiler.input.topic`](#profilerinputtopic)                                 | The name of the input Kafka topic.
+| [`profiler.output.topic`](#profileroutputtopic)                               | The name of the output Kafka topic. 
 | [`profiler.period.duration`](#profilerperiodduration)                         | The duration of each profile period.  
-| [`profiler.period.duration.units`](#profilerperioddurationunits)              | The units used to specify the [`profiler.period.duration`](#profilerperiodduration).  
+| [`profiler.period.duration.units`](#profilerperioddurationunits)              | The units used to specify the [`profiler.period.duration`](#profilerperiodduration).
+| [`profiler.window.duration`](#profilerwindowduration)                         | The duration of each profile window.
+| [`profiler.window.duration.units`](#profilerpwindowdurationunits)             | The units used to specify the [`profiler.window.duration`](#profilerwindowduration).
+| [`profiler.window.lag`](#profilerwindowlag)                                   | The maximum time lag for timestamps.
+| [`profiler.window.lag.units`](#profilerpwindowlagunits)                       | The units used to specify the [`profiler.window.lag`](#profilerwindowlag).
 | [`profiler.workers`](#profilerworkers)                                        | The number of worker processes for the topology.
 | [`profiler.executors`](#profilerexecutors)                                    | The number of executors to spawn per component.
 | [`profiler.ttl`](#profilerttl)                                                | If a message has not been applied to a Profile in this period of time, the Profile will be forgotten and its resources will be cleaned up.
 | [`profiler.ttl.units`](#profilerttlunits)                                     | The units used to specify the `profiler.ttl`.
-| [`profiler.hbase.salt.divisor`](#profilerhbasesaltdivisor)                    | A salt is prepended to the row key to help prevent hotspotting.
+| [`profiler.hbase.salt.divisor`](#profilerhbasesaltdivisor)                    | A salt is prepended to the row key to help prevent hot-spotting.
 | [`profiler.hbase.table`](#profilerhbasetable)                                 | The name of the HBase table that profiles are written to.
 | [`profiler.hbase.column.family`](#profilerhbasecolumnfamily)                  | The column family used to store profiles.
 | [`profiler.hbase.batch`](#profilerhbasebatch)                                 | The number of puts that are written to HBase in a single batch.
@@ -508,6 +568,36 @@ The units used to specify the `profiler.period.duration`.  This value should be
 
 *Important*: To read a profile using the Profiler Client, the Profiler Client's `profiler.client.period.duration.units` property must match this value.  Otherwise, the [Profiler Client](metron-analytics/metron-profiler-client) will be unable to read the profile data.
 
+### `profiler.window.duration`
+
+*Default*: 30
+
+The duration of each profile window.  Telemetry that arrives within a slice of time is processed within a single window.  
+
+Many windows of telemetry will be processed during a single profile period.  This does not change the output of the Profiler, it only changes how the Profiler processes data. The window defines how much data the Profiler processes in a single pass.
+
+This value should be defined along with [`profiler.window.duration.units`](#profilerwindowdurationunits).
+
+This value must be less than the period duration as defined by [`profiler.period.duration`](#profilerperiodduration) and [`profiler.period.duration.units`](#profilerperioddurationunits).
+
+### `profiler.window.duration.units`
+
+*Default*: SECONDS
+
+The units used to specify the `profiler.window.duration`.  This value should be defined along with [`profiler.window.duration`](#profilerwindowduration).
+
+### `profiler.window.lag`
+
+*Default*: 1
+
+The maximum time lag for timestamps. Timestamps cannot arrive out-of-order by more than this amount. This value should be defined along with [`profiler.window.lag.units`](#profilerwindowlagunits).
+
+### `profiler.window.lag.units`
+
+*Default*: SECONDS
+
+The units used to specify the `profiler.window.lag`.  This value should be defined along with [`profiler.window.lag`](#profilerwindowlag).
+
 ### `profiler.workers`
 
 *Default*: 1

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/config/profiler.properties
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/config/profiler.properties b/metron-analytics/metron-profiler/src/main/config/profiler.properties
index 896f8d5..fe3c475 100644
--- a/metron-analytics/metron-profiler/src/main/config/profiler.properties
+++ b/metron-analytics/metron-profiler/src/main/config/profiler.properties
@@ -22,6 +22,10 @@
 
 topology.worker.childopts=
 topology.auto-credentials=
+profiler.workers=1
+profiler.executors=0
+topology.message.timeout.secs=30
+topology.max.spout.pending=100000
 
 ##### Profiler #####
 
@@ -29,10 +33,16 @@ profiler.input.topic=indexing
 profiler.output.topic=enrichments
 profiler.period.duration=15
 profiler.period.duration.units=MINUTES
-profiler.workers=1
-profiler.executors=0
+profiler.window.duration=30
+profiler.window.duration.units=SECONDS
 profiler.ttl=30
 profiler.ttl.units=MINUTES
+profiler.window.lag=1
+profiler.window.lag.units=MINUTES
+profiler.max.routes.per.bolt=10000
+
+##### HBase #####
+
 profiler.hbase.salt.divisor=1000
 profiler.hbase.table=profiler
 profiler.hbase.column.family=P

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml b/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml
index 9ec5ba4..83c9fde 100644
--- a/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml
+++ b/metron-analytics/metron-profiler/src/main/flux/profiler/remote.yaml
@@ -17,10 +17,12 @@
 name: "profiler"
 
 config:
-    topology.worker.childopts: ${topology.worker.childopts}
     topology.workers: ${profiler.workers}
     topology.acker.executors: ${profiler.executors}
+    topology.worker.childopts: ${topology.worker.childopts}
     topology.auto-credentials: ${topology.auto-credentials}
+    topology.message.timeout.secs: ${topology.message.timeout.secs}
+    topology.max.spout.pending: ${topology.max.spout.pending}
 
 components:
 
@@ -107,11 +109,23 @@ components:
             -   name: "withProducerConfigs"
                 args: [ref: "kafkaWriterProps"]
 
-    -   id: "kafkaDestinationHandler"
-        className: "org.apache.metron.profiler.bolt.KafkaDestinationHandler"
+    -   id: "kafkaEmitter"
+        className: "org.apache.metron.profiler.bolt.KafkaEmitter"
+
+    -   id: "hbaseEmitter"
+        className: "org.apache.metron.profiler.bolt.HBaseEmitter"
+
+    -   id: "windowDuration"
+        className: "org.apache.storm.topology.base.BaseWindowedBolt$Duration"
+        constructorArgs:
+            - ${profiler.window.duration}
+            - "${profiler.window.duration.units}"
 
-    -   id: "hbaseDestinationHandler"
-        className: "org.apache.metron.profiler.bolt.HBaseDestinationHandler"
+    -   id: "windowLag"
+        className: "org.apache.storm.topology.base.BaseWindowedBolt$Duration"
+        constructorArgs:
+            - ${profiler.window.lag}
+            - "${profiler.window.lag.units}"
 
 spouts:
 
@@ -129,17 +143,23 @@ bolts:
 
     -   id: "builderBolt"
         className: "org.apache.metron.profiler.bolt.ProfileBuilderBolt"
-        constructorArgs:
-            - "${kafka.zk}"
         configMethods:
+            - name: "withZookeeperUrl"
+              args: ["${kafka.zk}"]
             - name: "withPeriodDuration"
               args: [${profiler.period.duration}, "${profiler.period.duration.units}"]
             - name: "withProfileTimeToLive"
               args: [${profiler.ttl}, "${profiler.ttl.units}"]
-            - name: "withDestinationHandler"
-              args: [ref: "kafkaDestinationHandler"]
-            - name: "withDestinationHandler"
-              args: [ref: "hbaseDestinationHandler"]
+            - name: "withEmitter"
+              args: [ref: "kafkaEmitter"]
+            - name: "withEmitter"
+              args: [ref: "hbaseEmitter"]
+            - name: "withTumblingWindow"
+              args: [ref: "windowDuration"]
+            - name: "withLag"
+              args: [ref: "windowLag"]
+            - name: "withMaxNumberOfRoutes"
+              args: [${profiler.max.routes.per.bolt}]
 
     -   id: "hbaseBolt"
         className: "org.apache.metron.hbase.bolt.HBaseBolt"

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/DestinationHandler.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/DestinationHandler.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/DestinationHandler.java
deleted file mode 100644
index 2257784..0000000
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/DestinationHandler.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-package org.apache.metron.profiler.bolt;
-
-import org.apache.metron.profiler.ProfileMeasurement;
-import org.apache.storm.task.OutputCollector;
-import org.apache.storm.topology.OutputFieldsDeclarer;
-
-/**
- * This class handles the mechanics of emitting a profile measurement to a
- * stream responsible for writing to a specific destination.
- *
- * The measurements produced by a profile can be written to one or more
- * destinations; HBase, Kafka, etc.  Each of the destinations leverage a
- * separate stream within the topology definition.
- */
-public interface DestinationHandler {
-
-  /**
-   * Each destination leverages a unique stream.  This method defines
-   * the unique stream identifier.
-   *
-   * The stream identifier must also be declared within the topology
-   * definition.
-   */
-  String getStreamId();
-
-  /**
-   * Declares the output fields for the stream.
-   * @param declarer
-   */
-  void declareOutputFields(OutputFieldsDeclarer declarer);
-
-  /**
-   * Emit the measurement.
-   * @param measurement The measurement to emit.
-   * @param collector The output collector.
-   */
-  void emit(ProfileMeasurement measurement, OutputCollector collector);
-}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignal.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignal.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignal.java
new file mode 100644
index 0000000..b9f57dd
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignal.java
@@ -0,0 +1,126 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.metron.profiler.bolt;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.invoke.MethodHandles;
+
+/**
+ * Signals a flush on a fixed frequency; every X milliseconds.
+ */
+public class FixedFrequencyFlushSignal implements FlushSignal {
+
+  protected static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  /**
+   * The latest known timestamp.
+   */
+  private long currentTime;
+
+  /**
+   * The time when the next flush should occur.
+   */
+  private long flushTime;
+
+  /**
+   * The amount of time between flushes in milliseconds.
+   */
+  private long flushFrequency;
+
+  public FixedFrequencyFlushSignal(long flushFrequencyMillis) {
+
+    if(flushFrequencyMillis < 0) {
+      throw new IllegalArgumentException("flush frequency must be >= 0");
+    }
+
+    this.flushFrequency = flushFrequencyMillis;
+    reset();
+  }
+
+  /**
+   * Resets the state used to keep track of time.
+   */
+  @Override
+  public void reset() {
+    flushTime = 0;
+    currentTime = 0;
+
+    LOG.debug("Flush counters reset");
+  }
+
+  /**
+   * Update the internal state which tracks time.
+   *
+   * @param timestamp The timestamp received within a tuple.
+   */
+  @Override
+  public void update(long timestamp) {
+
+    if(timestamp > currentTime) {
+
+      // need to update current time
+      LOG.debug("Updating current time; last={}, new={}", currentTime, timestamp);
+      currentTime = timestamp;
+
+    } else if ((currentTime - timestamp) > flushFrequency) {
+
+      // significantly out-of-order timestamps
+      LOG.warn("Timestamps out-of-order by '{}' ms. This may indicate a problem in the data. last={}, current={}",
+              (currentTime - timestamp),
+              timestamp,
+              currentTime);
+    }
+
+    if(flushTime == 0) {
+
+      // set the next time to flush
+      flushTime = currentTime + flushFrequency;
+      LOG.debug("Setting flush time; flushTime={}, currentTime={}, flushFreq={}",
+              flushTime,
+              currentTime,
+              flushFrequency);
+    }
+  }
+
+  /**
+   * Returns true, if it is time to flush.
+   *
+   * @return True if time to flush.  Otherwise, false.
+   */
+  @Override
+  public boolean isTimeToFlush() {
+
+    boolean flush = currentTime > flushTime;
+    LOG.debug("Flush={}, '{}' ms until flush; currentTime={}, flushTime={}",
+            flush,
+            flush ? 0 : (flushTime-currentTime),
+            currentTime,
+            flushTime);
+
+    return flush;
+  }
+
+  @Override
+  public long currentTimeMillis() {
+    return currentTime;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FlushSignal.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FlushSignal.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FlushSignal.java
new file mode 100644
index 0000000..0a9fc76
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/FlushSignal.java
@@ -0,0 +1,51 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.metron.profiler.bolt;
+
+/**
+ * Signals when it is time to flush a profile.
+ */
+public interface FlushSignal {
+
+  /**
+   * Returns true, if it is time to flush.
+   *
+   * @return True if time to flush.  Otherwise, false.
+   */
+  boolean isTimeToFlush();
+
+  /**
+   * Update the signaller with a known timestamp.
+   *
+   * @param timestamp A timestamp expected to be epoch milliseconds
+   */
+  void update(long timestamp);
+
+  /**
+   * Reset the signaller.
+   */
+  void reset();
+
+  /**
+   * Returns the current time in epoch milliseconds.
+   * @return The current time in epoch milliseconds.
+   */
+  long currentTimeMillis();
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseDestinationHandler.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseDestinationHandler.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseDestinationHandler.java
deleted file mode 100644
index 4fa5dc1..0000000
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseDestinationHandler.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-package org.apache.metron.profiler.bolt;
-
-import org.apache.metron.profiler.ProfileMeasurement;
-import org.apache.storm.task.OutputCollector;
-import org.apache.storm.topology.OutputFieldsDeclarer;
-import org.apache.storm.tuple.Fields;
-import org.apache.storm.tuple.Values;
-
-import java.io.Serializable;
-
-/**
- * Handles emitting a ProfileMeasurement to the stream which writes
- * profile measurements to HBase.
- */
-public class HBaseDestinationHandler implements DestinationHandler, Serializable {
-
-  /**
-   * The stream identifier used for this destination;
-   */
-  private String streamId = "hbase";
-
-  @Override
-  public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    declarer.declareStream(getStreamId(), new Fields("measurement"));
-  }
-
-  @Override
-  public void emit(ProfileMeasurement measurement, OutputCollector collector) {
-    collector.emit(getStreamId(), new Values(measurement));
-  }
-
-  @Override
-  public String getStreamId() {
-    return streamId;
-  }
-
-  public void setStreamId(String streamId) {
-    this.streamId = streamId;
-  }
-}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseEmitter.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseEmitter.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseEmitter.java
new file mode 100644
index 0000000..8e1229a
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/HBaseEmitter.java
@@ -0,0 +1,63 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.bolt;
+
+import org.apache.metron.profiler.ProfileMeasurement;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Serializable;
+import java.lang.invoke.MethodHandles;
+
+/**
+ * Responsible for emitting a {@link ProfileMeasurement} to an output stream that will
+ * persist data in HBase.
+ */
+public class HBaseEmitter implements ProfileMeasurementEmitter, Serializable {
+
+  protected static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  /**
+   * The stream identifier used for this destination;
+   */
+  private  String streamId = "hbase";
+
+  @Override
+  public void declareOutputFields(OutputFieldsDeclarer declarer) {
+    declarer.declareStream(getStreamId(), new Fields("measurement"));
+  }
+
+  @Override
+  public void emit(ProfileMeasurement measurement, OutputCollector collector) {
+    collector.emit(getStreamId(), new Values(measurement));
+  }
+
+  @Override
+  public String getStreamId() {
+    return streamId;
+  }
+
+  public void setStreamId(String streamId) {
+    this.streamId = streamId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaDestinationHandler.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaDestinationHandler.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaDestinationHandler.java
deleted file mode 100644
index be82468..0000000
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaDestinationHandler.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.metron.profiler.bolt;
-
-import java.io.Serializable;
-import java.lang.invoke.MethodHandles;
-import org.apache.commons.lang3.ClassUtils;
-import org.apache.metron.profiler.ProfileMeasurement;
-import org.apache.storm.task.OutputCollector;
-import org.apache.storm.topology.OutputFieldsDeclarer;
-import org.apache.storm.tuple.Fields;
-import org.apache.storm.tuple.Values;
-import org.json.simple.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handles emitting a ProfileMeasurement to the stream which writes
- * profile measurements to Kafka.
- */
-public class KafkaDestinationHandler implements DestinationHandler, Serializable {
-
-  protected static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
-
-  /**
-   * The stream identifier used for this destination;
-   */
-  private String streamId = "kafka";
-
-  /**
-   * The 'source.type' of messages originating from the Profiler.
-   */
-  private String sourceType = "profiler";
-
-  @Override
-  public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    // the kafka writer expects a field named 'message'
-    declarer.declareStream(getStreamId(), new Fields("message"));
-  }
-
-  @Override
-  public void emit(ProfileMeasurement measurement, OutputCollector collector) {
-
-    JSONObject message = new JSONObject();
-    message.put("profile", measurement.getDefinition().getProfile());
-    message.put("entity", measurement.getEntity());
-    message.put("period", measurement.getPeriod().getPeriod());
-    message.put("period.start", measurement.getPeriod().getStartTimeMillis());
-    message.put("period.end", measurement.getPeriod().getEndTimeMillis());
-    message.put("timestamp", System.currentTimeMillis());
-    message.put("source.type", sourceType);
-    message.put("is_alert", "true");
-
-    // append each of the triage values to the message
-    measurement.getTriageValues().forEach((key, value) -> {
-
-      if(isValidType(value)) {
-        message.put(key, value);
-
-      } else {
-        LOG.error(String.format("triage expression has invalid type. expect primitive types only. skipping: profile=%s, entity=%s, expression=%s, type=%s",
-                measurement.getDefinition().getProfile(), measurement.getEntity(), key, ClassUtils.getShortClassName(value, "null")));
-      }
-    });
-
-    collector.emit(getStreamId(), new Values(message));
-  }
-
-  /**
-   * The result of a profile's triage expressions must be a string or primitive type.
-   *
-   * This ensures that the value can be easily serialized and appended to a message destined for Kafka.
-   *
-   * @param value The value of a triage expression.
-   * @return True, if the type of the value is valid.
-   */
-  private boolean isValidType(Object value) {
-    return value != null && (value instanceof String || ClassUtils.isPrimitiveOrWrapper(value.getClass()));
-  }
-
-  @Override
-  public String getStreamId() {
-    return streamId;
-  }
-
-  public void setStreamId(String streamId) {
-    this.streamId = streamId;
-  }
-
-  public void setSourceType(String sourceType) {
-    this.sourceType = sourceType;
-  }
-}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaEmitter.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaEmitter.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaEmitter.java
new file mode 100644
index 0000000..29d1a49
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/KafkaEmitter.java
@@ -0,0 +1,114 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.metron.profiler.bolt;
+
+import java.io.Serializable;
+import java.lang.invoke.MethodHandles;
+import org.apache.commons.lang3.ClassUtils;
+import org.apache.metron.profiler.ProfileMeasurement;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.tuple.Fields;
+import org.apache.storm.tuple.Values;
+import org.json.simple.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Responsible for emitting a {@link ProfileMeasurement} to an output stream that will
+ * persist data in HBase.
+ */
+public class KafkaEmitter implements ProfileMeasurementEmitter, Serializable {
+
+  protected static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  /**
+   * The stream identifier used for this destination;
+   */
+  private String streamId = "kafka";
+
+  /**
+   * The 'source.type' of messages originating from the Profiler.
+   */
+  private String sourceType = "profiler";
+
+  @Override
+  public void declareOutputFields(OutputFieldsDeclarer declarer) {
+    // the kafka writer expects a field named 'message'
+    declarer.declareStream(getStreamId(), new Fields("message"));
+  }
+
+  @Override
+  public void emit(ProfileMeasurement measurement, OutputCollector collector) {
+
+    JSONObject message = new JSONObject();
+    message.put("profile", measurement.getDefinition().getProfile());
+    message.put("entity", measurement.getEntity());
+    message.put("period", measurement.getPeriod().getPeriod());
+    message.put("period.start", measurement.getPeriod().getStartTimeMillis());
+    message.put("period.end", measurement.getPeriod().getEndTimeMillis());
+    message.put("timestamp", System.currentTimeMillis());
+    message.put("source.type", sourceType);
+    message.put("is_alert", "true");
+
+    // append each of the triage values to the message
+    measurement.getTriageValues().forEach((key, value) -> {
+
+      if(isValidType(value)) {
+        message.put(key, value);
+
+      } else {
+        LOG.error(String.format(
+                "triage expression must result in primitive type, skipping; type=%s, profile=%s, entity=%s, expr=%s",
+                ClassUtils.getShortClassName(value, "null"),
+                measurement.getDefinition().getProfile(),
+                measurement.getEntity(),
+                key));
+      }
+    });
+
+    collector.emit(getStreamId(), new Values(message));
+  }
+
+  /**
+   * The result of a profile's triage expressions must be a string or primitive type.
+   *
+   * This ensures that the value can be easily serialized and appended to a message destined for Kafka.
+   *
+   * @param value The value of a triage expression.
+   * @return True, if the type of the value is valid.
+   */
+  private boolean isValidType(Object value) {
+    return value != null && (value instanceof String || ClassUtils.isPrimitiveOrWrapper(value.getClass()));
+  }
+
+  @Override
+  public String getStreamId() {
+    return streamId;
+  }
+
+  public void setStreamId(String streamId) {
+    this.streamId = streamId;
+  }
+
+  public void setSourceType(String sourceType) {
+    this.sourceType = sourceType;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ManualFlushSignal.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ManualFlushSignal.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ManualFlushSignal.java
new file mode 100644
index 0000000..d8e9539
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ManualFlushSignal.java
@@ -0,0 +1,54 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.bolt;
+
+/**
+ * Signals that a flush should occur.
+ *
+ * <p>The flush signal can be turned on or off like a switch as needed.  Most useful for testing.
+ */
+public class ManualFlushSignal implements FlushSignal {
+
+  private boolean flushNow = false;
+
+  public void setFlushNow(boolean flushNow) {
+    this.flushNow = flushNow;
+  }
+
+  @Override
+  public boolean isTimeToFlush() {
+    return flushNow;
+  }
+
+  @Override
+  public void update(long timestamp) {
+    // nothing to do
+  }
+
+  @Override
+  public void reset() {
+    // nothing to do.
+  }
+
+  @Override
+  public long currentTimeMillis() {
+    // not needed
+    return 0;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
index 3c8d875..ffe823f 100644
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
@@ -20,19 +20,36 @@
 
 package org.apache.metron.profiler.bolt;
 
-import org.apache.metron.common.bolt.ConfiguredProfilerBolt;
+import org.apache.commons.collections4.CollectionUtils;
+import org.apache.curator.RetryPolicy;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.metron.common.Constants;
+import org.apache.metron.common.configuration.ConfigurationType;
+import org.apache.metron.common.configuration.ConfigurationsUtils;
 import org.apache.metron.common.configuration.profiler.ProfileConfig;
+import org.apache.metron.common.configuration.profiler.ProfilerConfigurations;
+import org.apache.metron.common.zookeeper.configurations.ConfigurationsUpdater;
+import org.apache.metron.common.zookeeper.configurations.ProfilerUpdater;
+import org.apache.metron.common.zookeeper.configurations.Reloadable;
 import org.apache.metron.profiler.DefaultMessageDistributor;
+import org.apache.metron.profiler.MessageDistributor;
 import org.apache.metron.profiler.MessageRoute;
 import org.apache.metron.profiler.ProfileMeasurement;
 import org.apache.metron.stellar.common.utils.ConversionUtils;
 import org.apache.metron.stellar.dsl.Context;
+import org.apache.metron.zookeeper.SimpleEventListener;
+import org.apache.metron.zookeeper.ZKCache;
 import org.apache.storm.Config;
 import org.apache.storm.task.OutputCollector;
 import org.apache.storm.task.TopologyContext;
 import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseWindowedBolt;
 import org.apache.storm.tuple.Tuple;
 import org.apache.storm.utils.TupleUtils;
+import org.apache.storm.windowing.TupleWindow;
 import org.json.simple.JSONObject;
 import org.json.simple.parser.JSONParser;
 import org.slf4j.Logger;
@@ -42,42 +59,76 @@ import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
 import static java.lang.String.format;
+import static org.apache.metron.profiler.bolt.ProfileSplitterBolt.ENTITY_TUPLE_FIELD;
+import static org.apache.metron.profiler.bolt.ProfileSplitterBolt.MESSAGE_TUPLE_FIELD;
+import static org.apache.metron.profiler.bolt.ProfileSplitterBolt.PROFILE_TUPLE_FIELD;
+import static org.apache.metron.profiler.bolt.ProfileSplitterBolt.TIMESTAMP_TUPLE_FIELD;
 
 /**
- * A bolt that is responsible for building a Profile.
- *
- * This bolt maintains the state required to build a Profile.  When the window
- * period expires, the data is summarized as a ProfileMeasurement, all state is
- * flushed, and the ProfileMeasurement is emitted.
+ * A Storm bolt that is responsible for building a profile.
  *
+ * <p>This bolt maintains the state required to build a Profile.  When the window
+ * period expires, the data is summarized as a {@link ProfileMeasurement}, all state is
+ * flushed, and the {@link ProfileMeasurement} is emitted.
  */
-public class ProfileBuilderBolt extends ConfiguredProfilerBolt {
+public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
 
   protected static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
   private OutputCollector collector;
 
   /**
+   * The URL to connect to Zookeeper.
+   */
+  private String zookeeperUrl;
+
+  /**
+   * The Zookeeper client connection.
+   */
+  protected CuratorFramework zookeeperClient;
+
+  /**
+   * The Zookeeper cache.
+   */
+  protected ZKCache zookeeperCache;
+
+  /**
+   * Manages configuration for the Profiler.
+   */
+  private ProfilerConfigurations configurations;
+
+  /**
    * The duration of each profile period in milliseconds.
    */
   private long periodDurationMillis;
 
   /**
+   * The duration of Storm's event window.
+   */
+  private long windowDurationMillis;
+
+  /**
    * If a message has not been applied to a Profile in this number of milliseconds,
    * the Profile will be forgotten and its resources will be cleaned up.
    *
-   * WARNING: The TTL must be at least greater than the period duration.
+   * <p>WARNING: The TTL must be at least greater than the period duration.
    */
   private long profileTimeToLiveMillis;
 
   /**
+   * The maximum number of {@link MessageRoute} routes that will be maintained by
+   * this bolt.  After this value is exceeded, lesser used routes will be evicted
+   * from the internal cache.
+   */
+  private long maxNumberOfRoutes;
+
+  /**
    * Distributes messages to the profile builders.
    */
-  private DefaultMessageDistributor messageDistributor;
+  private MessageDistributor messageDistributor;
 
   /**
    * Parses JSON messages.
@@ -85,112 +136,245 @@ public class ProfileBuilderBolt extends ConfiguredProfilerBolt {
   private transient JSONParser parser;
 
   /**
-   * The measurements produced by a profile can be written to multiple destinations.  Each
-   * destination is handled by a separate `DestinationHandler`.
+   * Responsible for emitting {@link ProfileMeasurement} values.
+   *
+   * <p>The {@link ProfileMeasurement} values generated by a profile can be written to
+   * multiple endpoints like HBase or Kafka.  Each endpoint is handled by a separate
+   * {@link ProfileMeasurementEmitter}.
    */
-  private List<DestinationHandler> destinationHandlers;
+  private List<ProfileMeasurementEmitter> emitters;
 
   /**
-   * @param zookeeperUrl The Zookeeper URL that contains the configuration data.
+   * Signals when it is time to flush.
    */
-  public ProfileBuilderBolt(String zookeeperUrl) {
-    super(zookeeperUrl);
-    this.destinationHandlers = new ArrayList<>();
-  }
+  private FlushSignal flushSignal;
 
-  /**
-   * Defines the frequency at which the bolt will receive tick tuples.  Tick tuples are
-   * used to control how often a profile is flushed.
-   */
-  @Override
-  public Map<String, Object> getComponentConfiguration() {
-    // how frequently should the bolt receive tick tuples?
-    Config conf = new Config();
-    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, TimeUnit.MILLISECONDS.toSeconds(periodDurationMillis));
-    return conf;
+  public ProfileBuilderBolt() {
+    this.emitters = new ArrayList<>();
   }
 
   @Override
   public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
     super.prepare(stormConf, context, collector);
 
+    if(periodDurationMillis <= 0) {
+      throw new IllegalArgumentException("expect 'profiler.period.duration' >= 0");
+    }
+    if(profileTimeToLiveMillis <= 0) {
+      throw new IllegalArgumentException("expect 'profiler.ttl' >= 0");
+    }
     if(profileTimeToLiveMillis < periodDurationMillis) {
-      throw new IllegalStateException(format(
-              "invalid configuration: expect profile TTL (%d) to be greater than period duration (%d)",
-              profileTimeToLiveMillis,
-              periodDurationMillis));
+      throw new IllegalArgumentException("expect 'profiler.ttl' >= 'profiler.period.duration'");
     }
+    if(maxNumberOfRoutes <= 0) {
+      throw new IllegalArgumentException("expect 'profiler.max.routes.per.bolt' > 0");
+    }
+    if(windowDurationMillis <= 0) {
+      throw new IllegalArgumentException("expect 'profiler.window.duration' > 0");
+    }
+    if(windowDurationMillis > periodDurationMillis) {
+      throw new IllegalArgumentException("expect 'profiler.period.duration' >= 'profiler.window.duration'");
+    }
+    if(periodDurationMillis % windowDurationMillis != 0) {
+      throw new IllegalArgumentException("expect 'profiler.period.duration' % 'profiler.window.duration' == 0");
+    }
+
     this.collector = collector;
     this.parser = new JSONParser();
-    this.messageDistributor = new DefaultMessageDistributor(periodDurationMillis, profileTimeToLiveMillis);
+    this.messageDistributor = new DefaultMessageDistributor(periodDurationMillis, profileTimeToLiveMillis, maxNumberOfRoutes);
+    this.configurations = new ProfilerConfigurations();
+    this.flushSignal = new FixedFrequencyFlushSignal(periodDurationMillis);
+    setupZookeeper();
+  }
+
+  @Override
+  public void cleanup() {
+    zookeeperCache.close();
+    zookeeperClient.close();
+  }
+
+  private void setupZookeeper() {
+    try {
+      if (zookeeperClient == null) {
+        RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
+        zookeeperClient = CuratorFrameworkFactory.newClient(zookeeperUrl, retryPolicy);
+      }
+      zookeeperClient.start();
+
+      // this is temporary to ensure that any validation passes. the individual bolt
+      // will reinitialize stellar to dynamically pull from zookeeper.
+      ConfigurationsUtils.setupStellarStatically(zookeeperClient);
+      if (zookeeperCache == null) {
+        ConfigurationsUpdater<ProfilerConfigurations> updater = createUpdater();
+        SimpleEventListener listener = new SimpleEventListener.Builder()
+                .with( updater::update, TreeCacheEvent.Type.NODE_ADDED, TreeCacheEvent.Type.NODE_UPDATED)
+                .with( updater::delete, TreeCacheEvent.Type.NODE_REMOVED)
+                .build();
+        zookeeperCache = new ZKCache.Builder()
+                .withClient(zookeeperClient)
+                .withListener(listener)
+                .withRoot(Constants.ZOOKEEPER_TOPOLOGY_ROOT)
+                .build();
+        updater.forceUpdate(zookeeperClient);
+        zookeeperCache.start();
+      }
+    } catch (Exception e) {
+      LOG.error(e.getMessage(), e);
+      throw new RuntimeException(e);
+    }
+  }
+
+  protected ConfigurationsUpdater<ProfilerConfigurations> createUpdater() {
+    return new ProfilerUpdater(this, this::getConfigurations);
+  }
+
+  public ProfilerConfigurations getConfigurations() {
+    return configurations;
+  }
+
+  @Override
+  public void reloadCallback(String name, ConfigurationType type) {
+    // nothing to do
   }
 
   @Override
   public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    if(destinationHandlers.size() == 0) {
+
+    if(emitters.size() == 0) {
       throw new IllegalStateException("At least one destination handler must be defined.");
     }
 
-    // each destination will define its own stream
-    destinationHandlers.forEach(dest -> dest.declareOutputFields(declarer));
+    // allow each emitter to define its own stream
+    emitters.forEach(emitter -> emitter.declareOutputFields(declarer));
+  }
+
+  /**
+   * Defines the frequency at which the bolt will receive tick tuples.  Tick tuples are
+   * used to control how often a profile is flushed.
+   */
+  @Override
+  public Map<String, Object> getComponentConfiguration() {
+
+    Map<String, Object> conf = super.getComponentConfiguration();
+    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, TimeUnit.MILLISECONDS.toSeconds(profileTimeToLiveMillis));
+    return conf;
   }
 
   private Context getStellarContext() {
+
     Map<String, Object> global = getConfigurations().getGlobalConfig();
     return new Context.Builder()
-            .with(Context.Capabilities.ZOOKEEPER_CLIENT, () -> client)
+            .with(Context.Capabilities.ZOOKEEPER_CLIENT, () -> zookeeperClient)
             .with(Context.Capabilities.GLOBAL_CONFIG, () -> global)
             .with(Context.Capabilities.STELLAR_CONFIG, () -> global)
             .build();
   }
 
-  /**
-   * Expect to receive either a tick tuple or a telemetry message that needs applied
-   * to a profile.
-   * @param input The tuple.
-   */
   @Override
-  public void execute(Tuple input) {
+  public void execute(TupleWindow window) {
+
+    LOG.debug("Tuple window contains {} tuple(s), {} expired, {} new",
+            CollectionUtils.size(window.get()),
+            CollectionUtils.size(window.getExpired()),
+            CollectionUtils.size(window.getNew()));
+
     try {
-      if(TupleUtils.isTick(input)) {
-        handleTick();
 
-      } else {
-        handleMessage(input);
+      // handle each tuple in the window
+      for(Tuple tuple : window.get()) {
+
+        if(TupleUtils.isTick(tuple)) {
+          handleTick();
+
+        } else {
+          handleMessage(tuple);
+        }
+      }
+
+      // time to flush?
+      if(flushSignal.isTimeToFlush()) {
+        flushSignal.reset();
+
+        // flush the active profiles
+        List<ProfileMeasurement> measurements = messageDistributor.flush();
+        emitMeasurements(measurements);
+
+        LOG.debug("Flushed active profiles and found {} measurement(s).", measurements.size());
       }
 
     } catch (Throwable e) {
-      LOG.error(format("Unexpected failure: message='%s', tuple='%s'", e.getMessage(), input), e);
-      collector.reportError(e);
 
-    } finally {
-      collector.ack(input);
+      LOG.error("Unexpected error", e);
+      collector.reportError(e);
     }
   }
 
   /**
-   * Handles a telemetry message
-   * @param input The tuple.
+   * Flush all expired profiles when a 'tick' is received.
+   *
+   * If a profile has not received a message for an extended period of time then it is
+   * marked as expired.  Periodically we need to flush these expired profiles to ensure
+   * that their state is not lost.
    */
-  private void handleMessage(Tuple input) throws ExecutionException {
-    JSONObject message = getField("message", input, JSONObject.class);
-    ProfileConfig definition = getField("profile", input, ProfileConfig.class);
-    String entity = getField("entity", input, String.class);
-    MessageRoute route = new MessageRoute(definition, entity);
+  private void handleTick() {
+
+    // flush the expired profiles
+    List<ProfileMeasurement> measurements = messageDistributor.flushExpired();
+    emitMeasurements(measurements);
 
-    messageDistributor.distribute(message, route, getStellarContext());
+    LOG.debug("Flushed expired profiles and found {} measurement(s).", measurements.size());
   }
 
   /**
-   * Handles a tick tuple.
+   * Handles the processing of a single tuple.
+   *
+   * @param input The tuple containing a telemetry message.
    */
-  private void handleTick() {
-    List<ProfileMeasurement> measurements = messageDistributor.flush();
+  private void handleMessage(Tuple input) {
+
+    // crack open the tuple
+    JSONObject message = getField(MESSAGE_TUPLE_FIELD, input, JSONObject.class);
+    ProfileConfig definition = getField(PROFILE_TUPLE_FIELD, input, ProfileConfig.class);
+    String entity = getField(ENTITY_TUPLE_FIELD, input, String.class);
+    Long timestamp = getField(TIMESTAMP_TUPLE_FIELD, input, Long.class);
+
+    // keep track of time
+    flushSignal.update(timestamp);
+    
+    // distribute the message
+    MessageRoute route = new MessageRoute(definition, entity);
+    messageDistributor.distribute(message, timestamp, route, getStellarContext());
 
-    // forward the measurements to each destination handler
-    for(ProfileMeasurement m : measurements ) {
-      destinationHandlers.forEach(handler -> handler.emit(m, collector));
+    LOG.debug("Message distributed: profile={}, entity={}, timestamp={}", definition.getProfile(), entity, timestamp);
+  }
+
+  /**
+   * Handles the {@code ProfileMeasurement}s that are created when a profile is flushed.
+   *
+   * @param measurements The measurements to handle.
+   */
+  private void emitMeasurements(List<ProfileMeasurement> measurements) {
+
+    // flush each profile
+    for(ProfileMeasurement measurement: measurements) {
+
+      // allow each 'emitter' to emit the measurement
+      for (ProfileMeasurementEmitter emitter : emitters) {
+        emitter.emit(measurement, collector);
+
+        LOG.debug("Measurement emitted; stream={}, profile={}, entity={}, value={}, start={}, end={}, duration={}, period={}",
+                emitter.getStreamId(),
+                measurement.getProfileName(),
+                measurement.getEntity(),
+                measurement.getProfileValue(),
+                measurement.getPeriod().getStartTimeMillis(),
+                measurement.getPeriod().getEndTimeMillis(),
+                measurement.getPeriod().getDurationMillis(),
+                measurement.getPeriod().getPeriod());
+      }
     }
+
+    LOG.debug("Emitted {} measurement(s).", measurements.size());
   }
 
   /**
@@ -202,14 +386,27 @@ public class ProfileBuilderBolt extends ConfiguredProfilerBolt {
    * @param <T> The type of the field value.
    */
   private <T> T getField(String fieldName, Tuple tuple, Class<T> clazz) {
+
     T value = ConversionUtils.convert(tuple.getValueByField(fieldName), clazz);
     if(value == null) {
-      throw new IllegalStateException(format("invalid tuple received: missing or invalid field '%s'", fieldName));
+      throw new IllegalStateException(format("Invalid tuple: missing or invalid field '%s'", fieldName));
     }
 
     return value;
   }
 
+  @Override
+  public BaseWindowedBolt withTumblingWindow(BaseWindowedBolt.Duration duration) {
+
+    // need to capture the window duration for setting the flush count down
+    this.windowDurationMillis = duration.value;
+    return super.withTumblingWindow(duration);
+  }
+
+  public long getPeriodDurationMillis() {
+    return periodDurationMillis;
+  }
+
   public ProfileBuilderBolt withPeriodDurationMillis(long periodDurationMillis) {
     this.periodDurationMillis = periodDurationMillis;
     return this;
@@ -224,16 +421,55 @@ public class ProfileBuilderBolt extends ConfiguredProfilerBolt {
     return this;
   }
 
+  public long getWindowDurationMillis() {
+    return windowDurationMillis;
+  }
+
   public ProfileBuilderBolt withProfileTimeToLive(int duration, TimeUnit units) {
     return withProfileTimeToLiveMillis(units.toMillis(duration));
   }
 
-  public ProfileBuilderBolt withDestinationHandler(DestinationHandler handler) {
-    this.destinationHandlers.add(handler);
+  public ProfileBuilderBolt withEmitter(ProfileMeasurementEmitter emitter) {
+    this.emitters.add(emitter);
     return this;
   }
 
-  public DefaultMessageDistributor getMessageDistributor() {
+  public MessageDistributor getMessageDistributor() {
     return messageDistributor;
   }
+
+  public ProfileBuilderBolt withZookeeperUrl(String zookeeperUrl) {
+    this.zookeeperUrl = zookeeperUrl;
+    return this;
+  }
+
+  public ProfileBuilderBolt withZookeeperClient(CuratorFramework zookeeperClient) {
+    this.zookeeperClient = zookeeperClient;
+    return this;
+  }
+
+  public ProfileBuilderBolt withZookeeperCache(ZKCache zookeeperCache) {
+    this.zookeeperCache = zookeeperCache;
+    return this;
+  }
+
+  public ProfileBuilderBolt withProfilerConfigurations(ProfilerConfigurations configurations) {
+    this.configurations = configurations;
+    return this;
+  }
+
+  public ProfileBuilderBolt withMaxNumberOfRoutes(long maxNumberOfRoutes) {
+    this.maxNumberOfRoutes = maxNumberOfRoutes;
+    return this;
+  }
+
+  public ProfileBuilderBolt withFlushSignal(FlushSignal flushSignal) {
+    this.flushSignal = flushSignal;
+    return this;
+  }
+
+  public ProfileBuilderBolt withMessageDistributor(MessageDistributor messageDistributor) {
+    this.messageDistributor = messageDistributor;
+    return this;
+  }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileMeasurementEmitter.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileMeasurementEmitter.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileMeasurementEmitter.java
new file mode 100644
index 0000000..e1fe4e1
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileMeasurementEmitter.java
@@ -0,0 +1,59 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.bolt;
+
+import org.apache.metron.profiler.ProfileMeasurement;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+
+/**
+ * Handles the mechanics of emitting a {@link ProfileMeasurement} to an output
+ * stream.
+ *
+ * <p>The Profiler allows the measurements produced by a profile to be written to
+ * multiple endpoints such as HBase and Kafka.  Each of these endpoints will have
+ * a unique stream that the measurements are written to.
+ *
+ * <p>Implementors of this interface are responsible for defining and managing the
+ * output stream for a specific endpoint.
+ */
+public interface ProfileMeasurementEmitter {
+
+  /**
+   * Each destination leverages a unique stream.  This method defines
+   * the unique stream identifier.
+   *
+   * The stream identifier must also be declared within the topology
+   * definition.
+   */
+  String getStreamId();
+
+  /**
+   * Declares the output fields for the stream.
+   * @param declarer
+   */
+  void declareOutputFields(OutputFieldsDeclarer declarer);
+
+  /**
+   * Emit the measurement.
+   * @param measurement The measurement to emit.
+   * @param collector The output collector.
+   */
+  void emit(ProfileMeasurement measurement, OutputCollector collector);
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
index a453c66..4e62eee 100644
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileSplitterBolt.java
@@ -21,10 +21,14 @@
 package org.apache.metron.profiler.bolt;
 
 import org.apache.metron.common.bolt.ConfiguredProfilerBolt;
+import org.apache.metron.common.configuration.profiler.ProfileConfig;
 import org.apache.metron.common.configuration.profiler.ProfilerConfig;
-import org.apache.metron.profiler.MessageRouter;
-import org.apache.metron.profiler.MessageRoute;
 import org.apache.metron.profiler.DefaultMessageRouter;
+import org.apache.metron.profiler.MessageRoute;
+import org.apache.metron.profiler.MessageRouter;
+import org.apache.metron.profiler.clock.Clock;
+import org.apache.metron.profiler.clock.ClockFactory;
+import org.apache.metron.profiler.clock.DefaultClockFactory;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.storm.task.OutputCollector;
 import org.apache.storm.task.TopologyContext;
@@ -42,16 +46,45 @@ import java.io.UnsupportedEncodingException;
 import java.lang.invoke.MethodHandles;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 
 /**
- * The bolt responsible for filtering incoming messages and directing
- * each to the one or more bolts responsible for building a Profile.  Each
- * message may be needed by 0, 1 or even many Profiles.
+ * The Storm bolt responsible for filtering incoming messages and directing
+ * each to the downstream bolts responsible for building a Profile.
  */
 public class ProfileSplitterBolt extends ConfiguredProfilerBolt {
 
   protected static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
 
+  /**
+   * The name of the tuple field containing the entity.
+   *
+   * This is the result of executing a profile's 'entity' Stellar expression within
+   * the context of the telemetry message.
+   */
+  protected static final String ENTITY_TUPLE_FIELD = "entity";
+
+  /**
+   * The name of the tuple field containing the profile definition.
+   */
+  protected static final String PROFILE_TUPLE_FIELD = "profile";
+
+  /**
+   * The name of the tuple field containing the telemetry message.
+   */
+  protected static final String MESSAGE_TUPLE_FIELD = "message";
+
+  /**
+   * The name of the tuple field containing the timestamp of the telemetry message.
+   *
+   * <p>If a 'timestampField' has been configured, the timestamp was extracted
+   * from a field within the telemetry message.  This enables event time processing.
+   *
+   * <p>If a 'timestampField' has not been configured, then the Profiler uses
+   * processing time and the timestamp originated from the system clock.
+   */
+  protected static final String TIMESTAMP_TUPLE_FIELD = "timestamp";
+
   private OutputCollector collector;
 
   /**
@@ -62,7 +95,12 @@ public class ProfileSplitterBolt extends ConfiguredProfilerBolt {
   /**
    * The router responsible for routing incoming messages.
    */
-  private MessageRouter router;
+  private transient MessageRouter router;
+
+  /**
+   * Responsible for creating the {@link Clock}.
+   */
+  private transient ClockFactory clockFactory;
 
   /**
    * @param zookeeperUrl The Zookeeper URL that contains the configuration for this bolt.
@@ -77,6 +115,7 @@ public class ProfileSplitterBolt extends ConfiguredProfilerBolt {
     this.collector = collector;
     this.parser = new JSONParser();
     this.router = new DefaultMessageRouter(getStellarContext());
+    this.clockFactory = new DefaultClockFactory();
   }
 
   private Context getStellarContext() {
@@ -88,13 +127,26 @@ public class ProfileSplitterBolt extends ConfiguredProfilerBolt {
             .build();
   }
 
+  /**
+   * This bolt consumes telemetry messages and determines if the message is needed
+   * by any of the profiles.  The message is then routed to one or more downstream
+   * bolts that are responsible for building each profile
+   *
+   * <p>The outgoing tuples are timestamped so that Storm's window and event-time
+   * processing functionality can recognize the time of each message.
+   *
+   * <p>The timestamp that is attached to each outgoing tuple is what decides if
+   * the Profiler is operating on processing time or event time.
+   *
+   * @param input The tuple.
+   */
   @Override
   public void execute(Tuple input) {
     try {
       doExecute(input);
 
     } catch (IllegalArgumentException | ParseException | UnsupportedEncodingException e) {
-      LOG.error("Unexpected failure: message='{}', tuple='{}'", e.getMessage(), input, e);
+      LOG.error("Unexpected error", e);
       collector.reportError(e);
 
     } finally {
@@ -103,41 +155,85 @@ public class ProfileSplitterBolt extends ConfiguredProfilerBolt {
   }
 
   private void doExecute(Tuple input) throws ParseException, UnsupportedEncodingException {
+
     // retrieve the input message
     byte[] data = input.getBinary(0);
     JSONObject message = (JSONObject) parser.parse(new String(data, "UTF8"));
 
     // ensure there is a valid profiler configuration
     ProfilerConfig config = getProfilerConfig();
-    if(config != null) {
+    if(config != null && config.getProfiles().size() > 0) {
+
+      // what time is it?
+      Clock clock = clockFactory.createClock(config);
+      Optional<Long> timestamp = clock.currentTimeMillis(message);
 
-      // emit a message for each 'route'
-      List<MessageRoute> routes = router.route(message, config, getStellarContext());
-      for(MessageRoute route : routes) {
-        collector.emit(input, new Values(route.getEntity(), route.getProfileDefinition(), message));
-      }
+      // route the message.  if a message does not contain the timestamp field, it cannot be routed.
+      timestamp.ifPresent(ts -> routeMessage(input, message, config, ts));
 
     } else {
-      LOG.warn("No Profiler configuration found.  Nothing to do.");
+      LOG.debug("No Profiler configuration found.  Nothing to do.");
     }
   }
 
   /**
+   * Route a message based on the Profiler configuration.
+   * @param input The input tuple on which to anchor.
+   * @param message The telemetry message.
+   * @param config The Profiler configuration.
+   * @param timestamp The timestamp of the telemetry message.
+   */
+  private void routeMessage(Tuple input, JSONObject message, ProfilerConfig config, Long timestamp) {
+
+    // emit a tuple for each 'route'
+    List<MessageRoute> routes = router.route(message, config, getStellarContext());
+    for (MessageRoute route : routes) {
+
+      Values values = createValues(message, timestamp, route);
+      collector.emit(input, values);
+    }
+
+    LOG.debug("Found {} route(s) for message with timestamp={}", routes.size(), timestamp);
+  }
+
+  /**
    * Each emitted tuple contains the following fields.
    * <p>
    * <ol>
-   * <li> entity - The name of the entity.  The actual result of executing the Stellar expression.
-   * <li> profile - The profile definition that the message needs applied to.
-   * <li> message - The message containing JSON-formatted data that needs applied to a profile.
+   * <li>message - The message containing JSON-formatted data that needs applied to a profile.
+   * <li>timestamp - The timestamp of the message.
+   * <li>entity - The name of the entity.  The actual result of executing the Stellar expression.
+   * <li>profile - The profile definition that the message needs applied to.
    * </ol>
    * <p>
    */
   @Override
   public void declareOutputFields(OutputFieldsDeclarer declarer) {
-    declarer.declare(new Fields("entity", "profile", "message"));
+
+    // the order here must match 'createValues'
+    Fields fields = new Fields(MESSAGE_TUPLE_FIELD, TIMESTAMP_TUPLE_FIELD, ENTITY_TUPLE_FIELD, PROFILE_TUPLE_FIELD);
+    declarer.declare(fields);
+  }
+
+  /**
+   * Creates the {@link Values} attached to the outgoing tuple.
+   *
+   * @param message The telemetry message.
+   * @param timestamp The timestamp of the message.
+   * @param route The route the message must take.
+   * @return
+   */
+  private Values createValues(JSONObject message, Long timestamp, MessageRoute route) {
+
+    // the order here must match `declareOutputFields`
+    return new Values(message, timestamp, route.getEntity(), route.getProfileDefinition());
   }
 
   protected MessageRouter getMessageRouter() {
     return router;
   }
+
+  public void setClockFactory(ClockFactory clockFactory) {
+    this.clockFactory = clockFactory;
+  }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/test/config/zookeeper/event-time-test/profiler.json
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/config/zookeeper/event-time-test/profiler.json b/metron-analytics/metron-profiler/src/test/config/zookeeper/event-time-test/profiler.json
new file mode 100644
index 0000000..9d727a3
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/test/config/zookeeper/event-time-test/profiler.json
@@ -0,0 +1,12 @@
+{
+  "profiles": [
+    {
+      "profile": "event-time-test",
+      "foreach": "ip_src_addr",
+      "init":   { "counter": "0" },
+      "update": { "counter": "counter + 1" },
+      "result": "counter"
+    }
+  ],
+  "timestampField": "timestamp"
+}
\ No newline at end of file


[16/50] [abbrv] metron git commit: METRON-1497 Rest endpoint '/api/v1/search/search' needs to handle null when elastic search response return null for getAggregations (MohanDV via justinleet) closes apache/metron#968

Posted by rm...@apache.org.
METRON-1497 Rest endpoint '/api/v1/search/search' needs to handle null when elastic search response return null for getAggregations (MohanDV via justinleet) closes apache/metron#968


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/19b237de
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/19b237de
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/19b237de

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 19b237de27eed37ee87eca6d8e2198083fdb88d7
Parents: 9e95d4b
Author: MohanDV <mo...@gmail.com>
Authored: Tue Apr 3 10:25:52 2018 -0400
Committer: leet <le...@apache.org>
Committed: Tue Apr 3 10:25:52 2018 -0400

----------------------------------------------------------------------
 .../apache/metron/elasticsearch/dao/ElasticsearchDao.java | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/19b237de/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
index 9bb109d..26e5731 100644
--- a/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
+++ b/metron-platform/metron-elasticsearch/src/main/java/org/apache/metron/elasticsearch/dao/ElasticsearchDao.java
@@ -608,10 +608,12 @@ public class ElasticsearchDao implements IndexDao {
     Map<String, Map<String, Long>> fieldCounts = new HashMap<>();
     for (String field: fields) {
       Map<String, Long> valueCounts = new HashMap<>();
-      Aggregation aggregation = aggregations.get(getFacetAggregationName(field));
-      if (aggregation instanceof Terms) {
-        Terms terms = (Terms) aggregation;
-        terms.getBuckets().stream().forEach(bucket -> valueCounts.put(formatKey(bucket.getKey(), commonColumnMetadata.get(field)), bucket.getDocCount()));
+      if(aggregations != null ){
+        Aggregation aggregation = aggregations.get(getFacetAggregationName(field));
+        if (aggregation instanceof Terms) {
+          Terms terms = (Terms) aggregation;
+          terms.getBuckets().stream().forEach(bucket -> valueCounts.put(formatKey(bucket.getKey(), commonColumnMetadata.get(field)), bucket.getDocCount()));
+        }
       }
       fieldCounts.put(field, valueCounts);
     }


[47/50] [abbrv] metron git commit: METRON-1520: Add caching for stellar field transformations closes apache/incubator-metron#990

Posted by rm...@apache.org.
METRON-1520: Add caching for stellar field transformations closes apache/incubator-metron#990


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/1c5435cc
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/1c5435cc
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/1c5435cc

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 1c5435ccbe96c03e59c6a18d681da43561769dba
Parents: 37e3fd3
Author: cstella <ce...@gmail.com>
Authored: Wed Apr 25 11:48:44 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Wed Apr 25 11:48:44 2018 -0400

----------------------------------------------------------------------
 metron-platform/Performance-tuning-guide.md     |  13 ++
 .../configuration/SensorParserConfig.java       |  15 ++
 .../transformation/StellarTransformation.java   |   3 +-
 .../StellarTransformationTest.java              |  30 ++++
 metron-platform/metron-parsers/README.md        |  13 ++
 .../apache/metron/parsers/bolt/ParserBolt.java  |  15 +-
 .../stellar/common/CachingStellarProcessor.java | 144 +++++++++++++++++++
 .../org/apache/metron/stellar/dsl/Context.java  |  43 +++++-
 .../common/CachingStellarProcessorTest.java     | 104 ++++++++++++++
 9 files changed, 371 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/1c5435cc/metron-platform/Performance-tuning-guide.md
----------------------------------------------------------------------
diff --git a/metron-platform/Performance-tuning-guide.md b/metron-platform/Performance-tuning-guide.md
index e2d1ae2..c2d19d6 100644
--- a/metron-platform/Performance-tuning-guide.md
+++ b/metron-platform/Performance-tuning-guide.md
@@ -60,6 +60,19 @@ parallelism will leave you with idle consumers since Kafka limits the max number
 important because Kafka has certain ordering guarantees for message delivery per partition that would not be possible if more than
 one consumer in a given consumer group were able to read from that partition.
 
+## Sensor Topology Tuning Suggestions
+
+If you are using stellar field transformations in your sensors, by default, stellar expressions
+are not cached.  Sensors that use stellar field transformations by see a performance
+boost by turning on caching via setting the `cacheConfig`
+[property](metron-parsers#parser_configuration).
+This is beneficial if your transformations:
+
+* Are complex (e.g. `ENRICHMENT_GET` calls or other high latency calls)
+* All Yield the same results for the same inputs ( caching is either off or applied to all transformations)
+  * If any of your transformations are non-deterministic, caching should not be used as it will result in the likelihood of incorrect results being returned.
+
+
 ## Component Tuning Levers
 
 ### High Level Overview

http://git-wip-us.apache.org/repos/asf/metron/blob/1c5435cc/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/SensorParserConfig.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/SensorParserConfig.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/SensorParserConfig.java
index 2d0ccd8..d347481 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/SensorParserConfig.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/SensorParserConfig.java
@@ -45,11 +45,26 @@ public class SensorParserConfig implements Serializable {
   private Integer parserNumTasks = 1;
   private Integer errorWriterParallelism = 1;
   private Integer errorWriterNumTasks = 1;
+  private Map<String, Object> cacheConfig = new HashMap<>();
   private Map<String, Object> spoutConfig = new HashMap<>();
   private String securityProtocol = null;
   private Map<String, Object> stormConfig = new HashMap<>();
 
   /**
+   * Cache config for stellar field transformations.
+   * * stellar.cache.maxSize - The maximum number of elements in the cache.
+   * * stellar.cache.maxTimeRetain - The maximum amount of time an element is kept in the cache (in minutes).
+   * @return
+   */
+  public Map<String, Object> getCacheConfig() {
+    return cacheConfig;
+  }
+
+  public void setCacheConfig(Map<String, Object> cacheConfig) {
+    this.cacheConfig = cacheConfig;
+  }
+
+  /**
    * Return the number of workers for the topology.  This property will be used for the parser unless overridden on the CLI.
    * @return
    */

http://git-wip-us.apache.org/repos/asf/metron/blob/1c5435cc/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/StellarTransformation.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/StellarTransformation.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/StellarTransformation.java
index 2a22e21..bb7501d 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/StellarTransformation.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/StellarTransformation.java
@@ -18,6 +18,7 @@
 
 package org.apache.metron.common.field.transformation;
 
+import org.apache.metron.stellar.common.CachingStellarProcessor;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.stellar.dsl.MapVariableResolver;
 import org.apache.metron.stellar.dsl.StellarFunctions;
@@ -40,7 +41,7 @@ public class StellarTransformation implements FieldTransformation {
     Set<String> outputs = new HashSet<>(outputField);
     MapVariableResolver resolver = new MapVariableResolver(ret, intermediateVariables, input);
     resolver.add(sensorConfig);
-    StellarProcessor processor = new StellarProcessor();
+    StellarProcessor processor = new CachingStellarProcessor();
     for(Map.Entry<String, Object> kv : fieldMappingConfig.entrySet()) {
       String oField = kv.getKey();
       Object transformObj = kv.getValue();

http://git-wip-us.apache.org/repos/asf/metron/blob/1c5435cc/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/StellarTransformationTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/StellarTransformationTest.java b/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/StellarTransformationTest.java
index 0a3cbb0..fc91844 100644
--- a/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/StellarTransformationTest.java
+++ b/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/StellarTransformationTest.java
@@ -18,19 +18,49 @@
 
 package org.apache.metron.common.field.transformation;
 
+import com.github.benmanes.caffeine.cache.Cache;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
 import org.adrianwalker.multilinestring.Multiline;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.metron.common.configuration.FieldTransformer;
 import org.apache.metron.common.configuration.SensorParserConfig;
+import org.apache.metron.stellar.common.CachingStellarProcessor;
 import org.apache.metron.stellar.dsl.Context;
 import org.json.simple.JSONObject;
 import org.junit.Assert;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 
+@RunWith(Parameterized.class)
 public class StellarTransformationTest {
+  Context context;
+  public StellarTransformationTest(Cache<CachingStellarProcessor.Key, Object> cache) {
+    if(cache == null) {
+      context = Context.EMPTY_CONTEXT();
+    }
+    else {
+      context = new Context.Builder().with(Context.Capabilities.CACHE, () -> cache).build();
+    }
+  }
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(
+            new Object[][] {
+                     { CachingStellarProcessor.createCache(ImmutableMap.of(CachingStellarProcessor.MAX_CACHE_SIZE_PARAM, 10)) }
+                   , { CachingStellarProcessor.createCache(ImmutableMap.of(CachingStellarProcessor.MAX_CACHE_SIZE_PARAM, 1)) }
+                   , { CachingStellarProcessor.createCache(ImmutableMap.of(CachingStellarProcessor.MAX_CACHE_SIZE_PARAM, 0)) }
+                   , { null }
+                           }
+                        );
+  }
+
   /**
    {
     "fieldTransformations" : [

http://git-wip-us.apache.org/repos/asf/metron/blob/1c5435cc/metron-platform/metron-parsers/README.md
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/README.md b/metron-platform/metron-parsers/README.md
index 6b9d62e..1d2d834 100644
--- a/metron-platform/metron-parsers/README.md
+++ b/metron-platform/metron-parsers/README.md
@@ -174,6 +174,19 @@ then it is assumed to be a regex and will match any topic matching the pattern (
 * `spoutConfig` : A map representing a custom spout config (this is a map). This can be overridden on the command line.
 * `securityProtocol` : The security protocol to use for reading from kafka (this is a string).  This can be overridden on the command line and also specified in the spout config via the `security.protocol` key.  If both are specified, then they are merged and the CLI will take precedence.
 * `stormConfig` : The storm config to use (this is a map).  This can be overridden on the command line.  If both are specified, they are merged with CLI properties taking precedence.
+* `cacheConfig` : Cache config for stellar field transformations.   This configures a least frequently used cache.  This is a map with the following keys.  If not explicitly configured (the default), then no cache will be used.
+  * `stellar.cache.maxSize` - The maximum number of elements in the cache. Default is to not use a cache.
+  * `stellar.cache.maxTimeRetain` - The maximum amount of time an element is kept in the cache (in minutes). Default is to not use a cache.
+
+  Example of a cache config to contain at max `20000` stellar expressions for at most `20` minutes.:
+```
+{
+  "cacheConfig" : {
+    "stellar.cache.maxSize" : 20000,
+    "stellar.cache.maxTimeRetain" : 20
+  }
+}
+```
 
 The `fieldTransformations` is a complex object which defines a
 transformation which can be done to a message.  This transformation can 

http://git-wip-us.apache.org/repos/asf/metron/blob/1c5435cc/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java b/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java
index e996f14..dd59355 100644
--- a/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java
+++ b/metron-platform/metron-parsers/src/main/java/org/apache/metron/parsers/bolt/ParserBolt.java
@@ -31,6 +31,8 @@ import java.util.Optional;
 import java.util.Set;
 import java.util.UUID;
 import java.util.stream.Collectors;
+
+import com.github.benmanes.caffeine.cache.Cache;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.metron.common.Constants;
 import org.apache.metron.common.bolt.ConfiguredParserBolt;
@@ -45,6 +47,7 @@ import org.apache.metron.common.utils.JSONUtils;
 import org.apache.metron.parsers.filters.Filters;
 import org.apache.metron.parsers.interfaces.MessageFilter;
 import org.apache.metron.parsers.interfaces.MessageParser;
+import org.apache.metron.stellar.common.CachingStellarProcessor;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.stellar.dsl.StellarFunctions;
 import org.apache.storm.task.OutputCollector;
@@ -67,6 +70,7 @@ public class ParserBolt extends ConfiguredParserBolt implements Serializable {
   private WriterHandler writer;
   private Context stellarContext;
   private transient MessageGetStrategy messageGetStrategy;
+  private transient Cache<CachingStellarProcessor.Key, Object> cache;
   public ParserBolt( String zookeeperUrl
                    , String sensorType
                    , MessageParser<JSONObject> parser
@@ -94,6 +98,9 @@ public class ParserBolt extends ConfiguredParserBolt implements Serializable {
     super.prepare(stormConf, context, collector);
     messageGetStrategy = MessageGetters.DEFAULT_BYTES_FROM_POSITION.get();
     this.collector = collector;
+    if(getSensorParserConfig() != null) {
+      cache = CachingStellarProcessor.createCache(getSensorParserConfig().getCacheConfig());
+    }
     initializeStellar();
     if(getSensorParserConfig() != null && filter == null) {
       getSensorParserConfig().getParserConfig().putIfAbsent("stellarContext", stellarContext);
@@ -119,11 +126,15 @@ public class ParserBolt extends ConfiguredParserBolt implements Serializable {
   }
 
   protected void initializeStellar() {
-    this.stellarContext = new Context.Builder()
+    Context.Builder builder = new Context.Builder()
                                 .with(Context.Capabilities.ZOOKEEPER_CLIENT, () -> client)
                                 .with(Context.Capabilities.GLOBAL_CONFIG, () -> getConfigurations().getGlobalConfig())
                                 .with(Context.Capabilities.STELLAR_CONFIG, () -> getConfigurations().getGlobalConfig())
-                                .build();
+                                ;
+    if(cache != null) {
+      builder = builder.with(Context.Capabilities.CACHE, () -> cache);
+    }
+    this.stellarContext = builder.build();
     StellarFunctions.initialize(stellarContext);
   }
 

http://git-wip-us.apache.org/repos/asf/metron/blob/1c5435cc/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/CachingStellarProcessor.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/CachingStellarProcessor.java b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/CachingStellarProcessor.java
new file mode 100644
index 0000000..36e6579
--- /dev/null
+++ b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/common/CachingStellarProcessor.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.stellar.common;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import org.apache.metron.stellar.common.utils.ConversionUtils;
+import org.apache.metron.stellar.dsl.Context;
+import org.apache.metron.stellar.dsl.VariableResolver;
+import org.apache.metron.stellar.dsl.functions.resolver.FunctionResolver;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * The Caching Stellar Processor is a stellar processor that optionally fronts stellar with an expression-by-expression
+ * LFU cache.
+ */
+public class CachingStellarProcessor extends StellarProcessor {
+  private static ThreadLocal<Map<String, Set<String>> > variableCache = ThreadLocal.withInitial(() -> new HashMap<>());
+  public static String MAX_CACHE_SIZE_PARAM = "stellar.cache.maxSize";
+  public static String MAX_TIME_RETAIN_PARAM = "stellar.cache.maxTimeRetain";
+
+  public static class Key {
+    private String expression;
+    private Map<String, Object> input;
+
+    public Key(String expression, Map<String, Object> input) {
+      this.expression = expression;
+      this.input = input;
+    }
+
+    public String getExpression() {
+      return expression;
+    }
+
+    public Map<String, Object> getInput() {
+      return input;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      Key key = (Key) o;
+
+      if (getExpression() != null ? !getExpression().equals(key.getExpression()) : key.getExpression() != null)
+        return false;
+      return getInput() != null ? getInput().equals(key.getInput()) : key.getInput() == null;
+
+    }
+
+    @Override
+    public int hashCode() {
+      int result = getExpression() != null ? getExpression().hashCode() : 0;
+      result = 31 * result + (getInput() != null ? getInput().hashCode() : 0);
+      return result;
+    }
+  }
+
+
+  /**
+   * Parses and evaluates the given Stellar expression, {@code expression}.  Results will be taken from a cache if possible.
+   *
+   * @param expression             The Stellar expression to parse and evaluate.
+   * @param variableResolver The {@link VariableResolver} to determine values of variables used in the Stellar expression, {@code expression}.
+   * @param functionResolver The {@link FunctionResolver} to determine values of functions used in the Stellar expression, {@code expression}.
+   * @param context          The context used during validation.
+   * @return The value of the evaluated Stellar expression, {@code expression}.
+   */
+  @Override
+  public Object parse(String expression, VariableResolver variableResolver, FunctionResolver functionResolver, Context context) {
+    Optional<Object> cacheOpt = context.getCapability(Context.Capabilities.CACHE, false);
+    if(cacheOpt.isPresent()) {
+      Cache<Key, Object> cache = (Cache<Key, Object>) cacheOpt.get();
+      Key k = toKey(expression, variableResolver);
+      return cache.get(k, x -> parseUncached(x.expression, variableResolver, functionResolver, context));
+    }
+    else {
+      return parseUncached(expression, variableResolver, functionResolver, context);
+    }
+  }
+
+  protected Object parseUncached(String expression, VariableResolver variableResolver, FunctionResolver functionResolver, Context context) {
+    return super.parse(expression, variableResolver, functionResolver, context);
+  }
+
+  private Key toKey(String expression, VariableResolver resolver) {
+    Set<String> variablesUsed = variableCache.get().computeIfAbsent(expression, this::variablesUsed);
+    Map<String, Object> input = new HashMap<>();
+    for(String v : variablesUsed) {
+      input.computeIfAbsent(v, resolver::resolve);
+    }
+    return new Key(expression, input);
+  }
+
+  /**
+   * Create a cache given a config.  Note that if the cache size is <= 0, then no cache will be returned.
+   * @param config
+   * @return A cache.
+   */
+  public static Cache<Key, Object> createCache(Map<String, Object> config) {
+    if(config == null) {
+      return null;
+    }
+    Long maxSize = getParam(config, MAX_CACHE_SIZE_PARAM, null, Long.class);
+    Integer maxTimeRetain = getParam(config, MAX_TIME_RETAIN_PARAM, null, Integer.class);
+    if(maxSize == null || maxTimeRetain == null || maxSize <= 0 || maxTimeRetain <= 0) {
+      return null;
+    }
+    return Caffeine.newBuilder()
+                   .maximumSize(maxSize)
+                   .expireAfterWrite(maxTimeRetain, TimeUnit.MINUTES)
+                   .build();
+  }
+
+  private static <T> T getParam(Map<String, Object> config, String key, T defaultVal, Class<T> clazz) {
+    Object o = config.get(key);
+    if(o == null) {
+      return defaultVal;
+    }
+    T ret = ConversionUtils.convert(o, clazz);
+    return ret == null?defaultVal:ret;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/1c5435cc/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/Context.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/Context.java b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/Context.java
index 9568a05..8a477c4 100644
--- a/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/Context.java
+++ b/metron-stellar/stellar-common/src/main/java/org/apache/metron/stellar/dsl/Context.java
@@ -30,12 +30,43 @@ public class Context implements Serializable {
   
   public enum Capabilities {
       HBASE_PROVIDER
-    , GLOBAL_CONFIG
-    , ZOOKEEPER_CLIENT
-    , SERVICE_DISCOVERER
-    , STELLAR_CONFIG
-    , CONSOLE
-    , SHELL_VARIABLES
+    ,
+    /**
+     * This capability indicates that the global config is available.
+     */
+    GLOBAL_CONFIG
+    ,
+    /**
+     * This capability indicates that a zookeeper client (i.e. a Curator client, specifically) is available.
+     */
+    ZOOKEEPER_CLIENT
+    ,
+    /**
+     * This capability indicates that a MaaS service discoverer is available.
+     */
+    SERVICE_DISCOVERER
+    ,
+    /**
+     * This capability indicates that a map configuring stellar is available.  Generally this is done within the global config
+     * inside of storm, but may be sourced elsewhere (e.g. the CLI when running the REPL).
+     */
+    STELLAR_CONFIG
+    ,
+    /**
+     * This capability indicates that the Console object is available.  This is available when run via the CLI (e.g. from the REPL).
+     */
+    CONSOLE
+    ,
+    /**
+     * This capability indicates that shell variables are available.  This is available when run via the CLI (e.g. from the REPL).
+     */
+    SHELL_VARIABLES
+    ,
+    /**
+     * This capability indicates that the StellarProcessor should use a Caffeine cache to cache expression -> results.  If an expression
+     * is in the cache, then the cached result will be returned instead of recomputing.
+     */
+    CACHE
   }
 
   public enum ActivityType {

http://git-wip-us.apache.org/repos/asf/metron/blob/1c5435cc/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/CachingStellarProcessorTest.java
----------------------------------------------------------------------
diff --git a/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/CachingStellarProcessorTest.java b/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/CachingStellarProcessorTest.java
new file mode 100644
index 0000000..94421de
--- /dev/null
+++ b/metron-stellar/stellar-common/src/test/java/org/apache/metron/stellar/common/CachingStellarProcessorTest.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.stellar.common;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.google.common.collect.ImmutableMap;
+import org.apache.metron.stellar.dsl.Context;
+import org.apache.metron.stellar.dsl.MapVariableResolver;
+import org.apache.metron.stellar.dsl.StellarFunctions;
+import org.apache.metron.stellar.dsl.VariableResolver;
+import org.apache.metron.stellar.dsl.functions.resolver.FunctionResolver;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class CachingStellarProcessorTest {
+
+  private static Map<String, Object> fields = new HashMap<String, Object>() {{
+      put("name", "blah");
+    }};
+
+  @Test
+  public void testNoCaching() throws Exception {
+    //no caching, so every expression is a cache miss.
+    Assert.assertEquals(2, countMisses(2, Context.EMPTY_CONTEXT(), "TO_UPPER(name)"));
+    //Ensure the correct result is returned.
+    Assert.assertEquals("BLAH", evaluateExpression(Context.EMPTY_CONTEXT(), "TO_UPPER(name)"));
+  }
+
+  @Test
+  public void testCaching() throws Exception {
+    Cache<CachingStellarProcessor.Key, Object> cache = CachingStellarProcessor.createCache(
+                                                 ImmutableMap.of(CachingStellarProcessor.MAX_CACHE_SIZE_PARAM, 2
+                                                                ,CachingStellarProcessor.MAX_TIME_RETAIN_PARAM, 10
+                                                                )
+                                                                           );
+    Context context = new Context.Builder()
+                                 .with( Context.Capabilities.CACHE , () -> cache )
+                                 .build();
+    //running the same expression twice should hit the cache on the 2nd time and only yield one miss
+    Assert.assertEquals(1, countMisses(2, context, "TO_UPPER(name)"));
+
+    //Ensure the correct result is returned.
+    Assert.assertEquals("BLAH", evaluateExpression(context, "TO_UPPER(name)"));
+
+    //running the same expression 20 more times should pull from the cache
+    Assert.assertEquals(0, countMisses(20, context, "TO_UPPER(name)"));
+
+    //Now we are running 4 distinct operations with a cache size of 2.  The cache has 1 element in it before we start:
+    //  TO_LOWER(name) - miss (brand new), cache is full
+    //  TO_UPPER(name) - hit, cache is full
+    //  TO_UPPER('foo') - miss (brand new), cache is still full, but TO_LOWER is evicted as the least frequently used
+    //  JOIN... - miss (brand new), cache is still full, but TO_UPPER('foo') is evicted as the least frequently used
+    //this pattern repeats a 2nd time to add another 3 cache misses, totalling 6.
+    Assert.assertEquals(6, countMisses(2, context, "TO_LOWER(name)", "TO_UPPER(name)", "TO_UPPER('foo')", "JOIN([name, 'blah'], ',')"));
+  }
+
+  private Object evaluateExpression(Context context, String expression) {
+    StellarProcessor processor = new CachingStellarProcessor();
+    return processor.parse(expression
+                , new MapVariableResolver(fields)
+                , StellarFunctions.FUNCTION_RESOLVER()
+                , context);
+  }
+
+  private int countMisses(int numRepetition, Context context, String... expressions) {
+    AtomicInteger numExpressions = new AtomicInteger(0);
+    StellarProcessor processor = new CachingStellarProcessor() {
+      @Override
+      protected Object parseUncached(String expression, VariableResolver variableResolver, FunctionResolver functionResolver, Context context) {
+        numExpressions.incrementAndGet();
+        return super.parseUncached(expression, variableResolver, functionResolver, context);
+      }
+    };
+
+    for(int i = 0;i < numRepetition;++i) {
+      for(String expression : expressions) {
+        processor.parse(expression
+                , new MapVariableResolver(fields)
+                , StellarFunctions.FUNCTION_RESOLVER()
+                , context);
+      }
+    }
+    return numExpressions.get();
+  }
+}


[21/50] [abbrv] metron git commit: METRON-1462: Separate ES and Kibana from Metron Mpack (mmiklavc via mmiklavc) closes apache/metron#943

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/0ab39a32/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboard-bulkload.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboard-bulkload.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboard-bulkload.json
deleted file mode 100644
index 037f1c6..0000000
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/KIBANA/5.6.2/package/scripts/dashboard/dashboard-bulkload.json
+++ /dev/null
@@ -1,88 +0,0 @@
-{ "create" : { "_id": "all-metron-index", "_type": "index-pattern" } }
-{"title":"*_index_*","timeFieldName":"timestamp","notExpandable":true,"fields":"[{\"name\":\"AA\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"RA\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"RD\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"TC\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"TTLs\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"Z\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\"
 :true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_index\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"_score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_source\",\"type\":\"_source\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"actions\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:end:ts\",\"type\":\"date\",
 \"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"addl\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"analyzer\",\"type\":\"string\",\"count\":0,\"scripted\":false,
 \"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"analyzers\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"answers\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"app\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"arg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"assigned_ip\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"auth_attempts\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"auth_success\",\"type\":\"boolean\",\"count\":0,\"scri
 pted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"basic_constraints:ca\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"basic_constraints:path_len\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"bro_timestamp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"bro_timestamp.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"capture_password\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:curve\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFro
 mDocValues\":true},{\"name\":\"certificate:exponent\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:issuer\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_length\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:not_valid_after\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:not_valid_be
 fore\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:serial\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:sig_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:subject\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:version\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cipher\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cipher_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatab
 le\":true,\"readFromDocValues\":true},{\"name\":\"client\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"command\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"compression_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"conn_state\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"conn_uids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"connect_info\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"curve\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable
 \":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cwd\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"data_channel:orig_h\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:passive\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:resp_h\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:resp_p\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"date\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"depth\",\"type\":\"number\",\"cou
 nt\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dgmlen\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dhcp_host_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dip\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"direction\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dropped\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dst\",\"type\":\"ip\",\"count\"
 :0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"duration\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"end-reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"end_reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"end_reason.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"end_time\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentjoinbolt:joiner:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\
 ":\"enrichments:geo:ip_dst_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\
 "readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchabl
 e\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:end:ts\",\"type\":\"dat
 e\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"error_fields\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"error_hash\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"error_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"established\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ethdst\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ethlen\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ethsrc\"
 ,\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"exception\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"failed_sensor_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"failure_reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_desc\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_mime_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_size\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocV
 alues\":true},{\"name\":\"filename\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"first_received\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"from\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"fuid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"fuids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"guid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"guid.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"re
 adFromDocValues\":true},{\"name\":\"helo\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"history\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_key\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_key_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_p\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"hostname\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true
 ,\"readFromDocValues\":true},{\"name\":\"id\",\"type\":\"conflict\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false,\"conflictDescriptions\":{\"integer\":[\"snort_index_2017.11.06.19\",\"snort_index_2017.11.06.20\",\"snort_index_2017.11.06.21\",\"snort_index_2017.11.06.22\",\"snort_index_2017.11.06.23\",\"snort_index_2017.11.07.00\",\"snort_index_2017.11.07.01\"],\"keyword\":[\"bro_index_2017.11.02.23\",\"bro_index_2017.11.03.00\",\"bro_index_2017.11.03.01\",\"bro_index_2017.11.03.02\",\"bro_index_2017.11.03.03\",\"bro_index_2017.11.03.04\",\"bro_index_2017.11.03.13\",\"bro_index_2017.11.06.19\",\"bro_index_2017.11.06.20\",\"bro_index_2017.11.06.22\",\"bro_index_2017.11.06.23\",\"bro_index_2017.11.07.00\",\"bro_index_2017.11.07.01\"]}},{\"name\":\"iflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"in_reply_to\",\"type\":\"string\",\"count\":
 0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_dst_addr\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_dst_port\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_src_addr\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_src_port\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"iplen\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"is_alert\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"is_orig\",\"type\":\"boolean
 \",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"is_webmail\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"isn\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"issuer_subject\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"kex_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"last_alert\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"last_reply\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"lease_ti
 me\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"local_orig\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"local_resp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"mac\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"mac_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"mailfrom\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"md5\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"
 message\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"method\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"mime_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"missed_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"missing_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"msg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"msg_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true}
 ,{\"name\":\"n\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"next_protocol\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"note\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"notice\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"oct\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"orig_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\
 "name\":\"orig_fuids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"orig_fuids.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"orig_ip_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"orig_mime_types\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"orig_mime_types.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"orig_pkts\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"original_string\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchab
 le\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"original_string.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"overflow_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"p\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"parent_fuid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"passive\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"password\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"path\",\"type\":\"string\",\"count\":0,\"s
 cripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"peer\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"peer_descr\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"pkt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"port_num\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"proto\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"protocol\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"protocol.keyword\",\"type\":\"string\",\"cou
 nt\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"qclass\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"qclass_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"qtype\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"qtype_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"query\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"raw_message\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"raw_message_bytes\",\"typ
 e\":\"unknown\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"rcode\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rcode_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rcptto\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"referrer\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rejected\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"remote_ip\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"reply
 _code\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"reply_msg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"reply_to\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"request_body_len\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resp_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resp_fuids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"resp_fuids.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"read
 FromDocValues\":true},{\"name\":\"resp_ip_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resp_mime_types\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"resp_mime_types.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resp_pkts\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"response_body_len\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"result\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"resumed\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,
 \"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"riflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"risn\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"roct\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rpkt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rtag\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rtt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ruflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchabl
 e\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"san:dns\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"san:email\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"san:ip\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"san:uri\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"second_received\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"seen_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sensor:type\",\"type\":\"string\",\"count\":0,\"scripte
 d\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"sensor:type.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"serial\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"server\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"server_name\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"service\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sha1\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sha256\",\"type\":\"string\",\"cou
 nt\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sig_generator\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sig_id\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sig_rev\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"sip\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"software_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"source\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"source:type\",\"type\":\
 "string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"source:type.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"src\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"src_peer\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"stack\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"start_time\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"status_code\",\"type
 \":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"status_msg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sub\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"subject\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"suppress_for\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tag\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tcpack\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"tcpflags\",
 \"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"tcpseq\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"tcpwindow\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"threat:triage:level\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threat:triage:rules:0:score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threat:triage:score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatinteljoinbolt:joiner:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,
 \"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatintelsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatintelsplitterbolt:splitter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"timedout\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"timestamp\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tls\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"to\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"tos\",\"type\":\"num
 ber\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"total_bytes\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"trans_depth\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"trans_id\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ttl\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tunnel_parents\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"uflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"uid\",\"t
 ype\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"unparsed_version\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"uri\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"user\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"user_agent\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"username\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"v
 ersion:addl\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version:major\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version:minor\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version:minor2\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"version:minor3\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"x_originating_ip\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true}]"}
-{ "create" : { "_id": "AV-Sj0e2hKs1cXXnFMqF", "_type": "visualization" } }
-{"title":"Welcome to Apache Metron","visState":"{\"title\":\"Welcome to Apache Metron\",\"type\":\"markdown\",\"params\":{\"type\":\"markdown\",\"markdown\":\"This dashboard enables the validation of Apache Metron and the end-to-end functioning of its default sensor suite.  The default sensor suite includes [\\n                            Snort](https://www.snort.org/), [\\n                            Bro](https://www.bro.org/), and [\\n                            YAF](https://tools.netsa.cert.org/yaf/).  One of Apache Metron's primary goals is to simplify the on-boarding of additional sources of telemetry.  In a production deployment these default sensors should be replaced with ones applicable to the target environment.\\n\\nApache Metron enables disparate sources of telemetry to all be viewed under a 'single pane of glass.'  Telemetry from each of the default sensors can be searched, aggregated, summarized, and viewed within this dashboard. This dashboard should be used as a spri
 ngboard upon which to create your own customized dashboards.\\n\\nThe panels below highlight the volume and variety of events that are currently being consumed by Apache Metron.\"},\"aggs\":[],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"query\":{\"match_all\":{}},\"filter\":[]}"}}
-{ "index" : { "_id": "5.6.2", "_type": "config" } }
-{"defaultIndex":"AV-S2e81hKs1cXXnFMqN"}
-{ "create" : { "_id": "AV-dVurck7f2nZ-iH3Ka", "_type": "visualization" } }
-{"title":"Event Count By Type","visState":"{\"title\":\"Event Count By Type\",\"type\":\"histogram\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\",\"defaultYExtents\":false},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"
 times\":[],\"addTimeMarker\":false,\"type\":\"histogram\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"source:type\",\"size\":20,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"legendOpen\":true,\"colors\":{\"yaf\":\"#CCA300\",\"snort\":\"#C15C17\",\"bro\":\"#F9934E\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, yaf, snort\",\"params\":[\"bro\",\"yaf\",\"snort\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}},{\"match_phrase\":{\"source:type\":\"snort\"}}],\"minimum_sh
 ould_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
-{ "create" : { "_id": "AV-YyJw3PfR7HJex-ZdY", "_type": "visualization" } }
-{"title":"All index TS event count","visState":"{\"title\":\"All index TS event count\",\"type\":\"metrics\",\"params\":{\"id\":\"eac7cbe0-c411-11e7-a0b9-2137696bd057\",\"type\":\"metric\",\"series\":[{\"id\":\"eac7cbe1-c411-11e7-a0b9-2137696bd057\",\"color\":\"#68BC00\",\"split_mode\":\"everything\",\"metrics\":[{\"id\":\"eac7cbe2-c411-11e7-a0b9-2137696bd057\",\"type\":\"count\"}],\"seperate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"number\",\"chart_type\":\"line\",\"line_width\":1,\"point_size\":1,\"fill\":0.5,\"stacked\":\"none\",\"label\":\"Event Count\",\"split_filters\":[{\"color\":\"#68BC00\",\"id\":\"89be23f0-c4af-11e7-ac01-25d5c1ff2e49\"}],\"series_drop_last_bucket\":0}],\"time_field\":\"timestamp\",\"index_pattern\":\"bro_index*,snort_index*,yaf_index*\",\"interval\":\"1y\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"show_legend\":1,\"background_color_rules\":[{\"id\":\"022dc960-c412-11e7-a0b9-2137696bd057\"}],\"bar_color_rules\":[{\"id\":\"21ffb0
 f0-c412-11e7-a0b9-2137696bd057\"}],\"filter\":\"\",\"drop_last_bucket\":0},\"aggs\":[],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"query\":{\"match_all\":{}},\"filter\":[]}"}}
-{ "create" : { "_id": "AV-cBm5JFLIoshSSHghu", "_type": "visualization" } }
-{"title":"All index TS Chart","visState":"{\"title\":\"All index TS Chart\",\"type\":\"metrics\",\"params\":{\"id\":\"eac7cbe0-c411-11e7-a0b9-2137696bd057\",\"type\":\"timeseries\",\"series\":[{\"id\":\"eac7cbe1-c411-11e7-a0b9-2137696bd057\",\"color\":\"rgba(0,156,224,1)\",\"split_mode\":\"terms\",\"metrics\":[{\"id\":\"eac7cbe2-c411-11e7-a0b9-2137696bd057\",\"type\":\"count\"}],\"seperate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"number\",\"chart_type\":\"bar\",\"line_width\":\"1\",\"point_size\":1,\"fill\":0.5,\"stacked\":\"stacked\",\"label\":\"Events\",\"terms_field\":\"source:type\",\"value_template\":\"{{value}}\"}],\"time_field\":\"timestamp\",\"index_pattern\":\"bro*,snort*,yaf*\",\"interval\":\"30s\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"show_legend\":1,\"background_color_rules\":[{\"id\":\"022dc960-c412-11e7-a0b9-2137696bd057\"}],\"bar_color_rules\":[{\"id\":\"21ffb0f0-c412-11e7-a0b9-2137696bd057\"}],\"show_grid\":1,\"drop_last_bucket\":0},\
 "aggs\":[],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"query\":{\"match_all\":{}},\"filter\":[]}"}}
-{ "create" : { "_id": "AV-dXz9Lk7f2nZ-iH3Kb", "_type": "visualization" } }
-{"title":"Event Count Pie Chart","visState":"{\"title\":\"Event Count Pie Chart\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false,\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Events by Source Type\"}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"source:type\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_ph
 rase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
-{ "create" : { "_id": "AV-ddhh7k7f2nZ-iH3Kx", "_type": "visualization" } }
-{"title":"Flow Location Map","visState":"{\"title\":\"Flow Location Map\",\"type\":\"tile_map\",\"params\":{\"mapType\":\"Scaled Circle Markers\",\"isDesaturated\":true,\"addTooltip\":true,\"heatMaxZoom\":0,\"heatMinOpacity\":0.1,\"heatRadius\":25,\"heatBlur\":15,\"legendPosition\":\"bottomright\",\"mapZoom\":2,\"mapCenter\":[0,0],\"wms\":{\"enabled\":false,\"url\":\"https://basemap.nationalmap.gov/arcgis/services/USGSTopo/MapServer/WMSServer\",\"options\":{\"version\":\"1.3.0\",\"layers\":\"0\",\"format\":\"image/png\",\"transparent\":true,\"attribution\":\"Maps provided by USGS\",\"styles\":\"\"}},\"type\":\"tile_map\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"geohash_grid\",\"schema\":\"segment\",\"params\":{\"field\":\"enrichments:geo:ip_src_addr:location_point\",\"autoPrecision\":true,\"useGeocentroid\":true,\"precision\":2,\"customLabel\":\"Flow Source Locations\"}}],\"listeners\"
 :{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_phrase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
-{ "create" : { "_id": "AV-dfk_gk7f2nZ-iH3K0", "_type": "visualization" } }
-{"title":"Events By Country","visState":"{\"title\":\"Events By Country\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false,\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"enrichments:geo:ip_src_addr:country\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}","uiStateJSON":"{\"spy\":{\"mode\":{\"name\":null,\"fill\":false}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type
 \":\"bro\"}},{\"match_phrase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
-{ "create" : { "_id": "AV-TUPlDgto7-W6O2b3n", "_type": "index-pattern" } }
-{"title":"yaf_index*","timeFieldName":"timestamp","notExpandable":true,"fields":"[{\"name\":\"_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_index\",\"type\":\"string\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"_score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_source\",\"type\":\"_source\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"adapter:geoadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:end:t
 s\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"app\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dip\",\"type\":\"string\",\"count\":0,\"s
 cripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"duration\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"end-reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"end_reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"end_reason.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"end_time\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentjoinbolt:joiner:
 ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichment
 s:geo:ip_dst_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFro
 mDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_src_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"sea
 rchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"guid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"guid.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"iflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_dst_addr\",\"type\":\"ip\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_dst_port\",\"type\":\"number\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_src_addr\",\"type\":\"i
 p\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ip_src_port\",\"type\":\"number\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"isn\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"oct\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"original_string\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"original_string.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"pkt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"prot
 o\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"protocol\",\"type\":\"string\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"protocol.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"riflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"risn\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"roct\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rpkt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":
 \"rtag\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"rtt\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"ruflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sip\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"source:type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"sp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"start_time\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"tag\"
 ,\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatinteljoinbolt:joiner:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatintelsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"threatintelsplitterbolt:splitter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"timestamp\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"uflags\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true}]"}
-{ "create" : { "_id": "AV-eebabk7f2nZ-iH3L1", "_type": "visualization" } }
-{"title":"YAF Flow Duration","visState":"{\"title\":\"YAF Flow Duration\",\"type\":\"area\",\"params\":{\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"truncate\":100},\"title\":{\"text\":\"Flow Duration (seconds)\"}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":\"true\",\"type\":\"area\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"drawLinesBetweenPoints\":true,\"showCircles\":true,\"interpolate\":\"linear\",\"valueAxis\":\"ValueAxis-1\"}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"
 right\",\"times\":[],\"addTimeMarker\":false,\"type\":\"area\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"duration\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Flow Duration (seconds)\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"legendOpen\":false}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-TUPlDgto7-W6O2b3n\",\"query\":{\"match_all\":{}},\"filter\":[]}"}}
-{ "create" : { "_id": "AV-deDqXk7f2nZ-iH3Ky", "_type": "visualization" } }
-{"title":"Geo-IP Locations","visState":"{\"title\":\"Geo-IP Locations\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":\"60\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"enrichments:geo:ip_src_addr:country\",\"customLabel\":\"Unique Location(s)\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0
 ,104,55)\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_phrase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
-{ "create" : { "_id": "AV-YvG0DPfR7HJex-ZaS", "_type": "visualization" } }
-{"title":"Event Count","visState":"{\"title\":\"Event Count\",\"type\":\"metric\",\"params\":{\"addLegend\":false,\"addTooltip\":true,\"gauge\":{\"autoExtend\":false,\"backStyle\":\"Full\",\"colorSchema\":\"Green to Red\",\"colorsRange\":[{\"from\":0,\"to\":100}],\"gaugeColorMode\":\"None\",\"gaugeStyle\":\"Full\",\"gaugeType\":\"Metric\",\"invertColors\":false,\"labels\":{\"color\":\"black\",\"show\":false},\"orientation\":\"vertical\",\"percentageMode\":false,\"scale\":{\"color\":\"#333\",\"labels\":false,\"show\":false,\"width\":2},\"style\":{\"bgColor\":false,\"fontSize\":\"60\",\"labelColor\":false,\"subText\":\"\",\"bgFill\":\"\"},\"type\":\"simple\",\"useRange\":false,\"verticalSplit\":false},\"type\":\"gauge\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Event Count\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","version":1,"kibanaSavedO
 bjectMeta":{"searchSourceJSON":"{\"index\":\"all-metron-index\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"all-metron-index\",\"type\":\"phrases\",\"key\":\"source:type\",\"value\":\"bro, snort, yaf\",\"params\":[\"bro\",\"snort\",\"yaf\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"source:type\":\"bro\"}},{\"match_phrase\":{\"source:type\":\"snort\"}},{\"match_phrase\":{\"source:type\":\"yaf\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
-{ "create" : { "_id": "AV-ejKEdk7f2nZ-iH3MI", "_type": "visualization" } }
-{"title":"Web Requests","visState":"{\"title\":\"Web Requests\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":60,\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-S
 2e81hKs1cXXnFMqN\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"AV-S2e81hKs1cXXnFMqN\",\"type\":\"phrases\",\"key\":\"protocol\",\"value\":\"http, https\",\"params\":[\"http\",\"https\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"protocol\":\"http\"}},{\"match_phrase\":{\"protocol\":\"https\"}}],\"minimum_should_match\":1}},\"$state\":{\"store\":\"appState\"}}]}"}}
-{ "create" : { "_id": "AV-ejbG6k7f2nZ-iH3MJ", "_type": "visualization" } }
-{"title":"DNS Requests","visState":"{\"title\":\"DNS Requests\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":60,\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-S
 2e81hKs1cXXnFMqN\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"AV-S2e81hKs1cXXnFMqN\",\"negate\":false,\"disabled\":false,\"alias\":null,\"type\":\"phrase\",\"key\":\"protocol\",\"value\":\"dns\"},\"query\":{\"match\":{\"protocol\":{\"query\":\"dns\",\"type\":\"phrase\"}}},\"$state\":{\"store\":\"appState\"}}]}"}}
-{ "create" : { "_id": "AV-eh5Wgk7f2nZ-iH3MG", "_type": "visualization" } }
-{"title":"Snort Alert Types","visState":"{\"title\":\"Snort Alert Types\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":60,\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"sig_id\",\"customLabel\":\"Alert Type(s)\"}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","ver
 sion":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-TAoyPhKs1cXXnFMqi\",\"query\":{\"match_all\":{}},\"filter\":[]}"}}
-{ "create" : { "_id": "AV-ecrFkk7f2nZ-iH3L0", "_type": "visualization" } }
-{"title":"Yaf Flows Count","visState":"{\"title\":\"Yaf Flows Count\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"gauge\",\"gauge\":{\"verticalSplit\":false,\"autoExtend\":false,\"percentageMode\":false,\"gaugeType\":\"Metric\",\"gaugeStyle\":\"Full\",\"backStyle\":\"Full\",\"orientation\":\"vertical\",\"colorSchema\":\"Green to Red\",\"gaugeColorMode\":\"None\",\"useRange\":false,\"colorsRange\":[{\"from\":0,\"to\":100}],\"invertColors\":false,\"labels\":{\"show\":false,\"color\":\"black\"},\"scale\":{\"show\":false,\"labels\":false,\"color\":\"#333\",\"width\":2},\"type\":\"simple\",\"style\":{\"fontSize\":60,\"bgColor\":false,\"labelColor\":false,\"subText\":\"\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}}],\"listeners\":{}}","uiStateJSON":"{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":
 \"AV-TUPlDgto7-W6O2b3n\",\"query\":{\"match_all\":{}},\"filter\":[]}"}}
-{ "create" : { "_id": "AV-ek_Jnk7f2nZ-iH3MK", "_type": "visualization" } }
-{"title":"Web Request Type","visState":"{\"title\":\"Web Request Type\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false,\"type\":\"pie\"},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"method\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}","uiStateJSON":"{}","description":"","version":1,"kibanaSavedObjectMeta":{"searchSourceJSON":"{\"index\":\"AV-S2e81hKs1cXXnFMqN\",\"query\":{\"match_all\":{}},\"filter\":[{\"meta\":{\"index\":\"AV-S2e81hKs1cXXnFMqN\",\"type\":\"phrases\",\"key\":\"protocol\",\"value\":\"http, https\",\"params\":[\"http\",\"https\"],\"negate\":false,\"disabled\":false,\"alias\":null},\"query\":{\"bool\":{\"should\":[{\"match_phrase\":{\"protocol\":\"http\"}},{\"match_phrase\":{\"protocol\":\"https\"}}],\"minimum_should_match\":1}},\
 "$state\":{\"store\":\"appState\"}}]}"}}
-{ "create" : { "_id": "AV-S2e81hKs1cXXnFMqN", "_type": "index-pattern" } }
-{"title":"bro_index*","timeFieldName":"timestamp","notExpandable":true,"fields":"[{\"name\":\"AA\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"RA\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"RD\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"TC\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"TTLs\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"Z\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"_id\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\
 ":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_index\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"_score\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_source\",\"type\":\"_source\",\"count\":0,\"scripted\":false,\"searchable\":false,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"actions\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:geoadapter:end:ts\",\"type\":\"date\"
 ,\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:hostfromjsonlistadapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"adapter:threatinteladapter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"addl\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"analyzer\",\"type\":\"string\",\"count\":0,\"scripted\":false
 ,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"analyzers\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"answers\",\"type\":\"string\",\"count\":1,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"arg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"assigned_ip\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"auth_attempts\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"auth_success\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"basic_constraints:ca\",\"type\":\"boolean\"
 ,\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"basic_constraints:path_len\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"bro_timestamp\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"bro_timestamp.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"capture_password\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:curve\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:exponent\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatabl
 e\":true,\"readFromDocValues\":true},{\"name\":\"certificate:issuer\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_length\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:key_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:not_valid_after\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:not_valid_before\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":
 \"certificate:serial\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:sig_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:subject\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"certificate:version\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cipher\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cipher_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"client\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggreg
 atable\":true,\"readFromDocValues\":true},{\"name\":\"command\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"compression_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"conn_state\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"conn_uids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"connect_info\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"curve\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"cwd\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchabl
 e\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"data_channel:orig_h\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:passive\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:resp_h\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"data_channel:resp_p\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"date\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"depth\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dhcp_host_name\",\"type\":\"st
 ring\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"direction\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dropped\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"dst\",\"type\":\"ip\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"duration\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentjoinbolt:joiner:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:city\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues
 \":true},{\"name\":\"enrichments:geo:ip_dst_addr:country\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:dmaCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:latitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:locID\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:location_point\",\"type\":\"geo_point\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:longitude\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\
 "aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichments:geo:ip_dst_addr:postalCode\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:begin:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"enrichmentsplitterbolt:splitter:end:ts\",\"type\":\"date\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"established\",\"type\":\"boolean\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"failure_reason\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_desc\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDoc
 Values\":true},{\"name\":\"file_mime_type\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"file_size\",\"type\":\"number\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"filename\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"first_received\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"from\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"fuid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"fuids\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":tru
 e,\"readFromDocValues\":true},{\"name\":\"guid\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":false,\"readFromDocValues\":false},{\"name\":\"guid.keyword\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"helo\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":false},{\"name\":\"history\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host\",\"type\":\"string\",\"count\":2,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_key\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregatable\":true,\"readFromDocValues\":true},{\"name\":\"host_key_alg\",\"type\":\"string\",\"count\":0,\"scripted\":false,\"searchable\":true,\"aggregata
 ble\":true,\"readFromDocValues\":true},{\"name\":\"host_p\",\"

<TRUNCATED>

[13/50] [abbrv] metron git commit: METRON-590 Enable Use of Event Time in Profiler (nickwallen) closes apache/metron#965

Posted by rm...@apache.org.
METRON-590 Enable Use of Event Time in Profiler (nickwallen) closes apache/metron#965


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/3083b471
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/3083b471
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/3083b471

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 3083b471fe912bc74d27017834e6c80ff177680e
Parents: 46ad9d9
Author: nickwallen <ni...@nickallen.org>
Authored: Tue Mar 20 16:00:20 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Tue Mar 20 16:00:20 2018 -0400

----------------------------------------------------------------------
 .../client/stellar/ProfilerFunctions.java       |  14 +-
 .../profiler/DefaultMessageDistributor.java     | 207 +++++++-
 .../metron/profiler/DefaultProfileBuilder.java  | 110 ++--
 .../metron/profiler/MessageDistributor.java     |  48 +-
 .../apache/metron/profiler/MessageRoute.java    |  19 +-
 .../apache/metron/profiler/MessageRouter.java   |  11 +-
 .../apache/metron/profiler/ProfileBuilder.java  |  34 +-
 .../metron/profiler/ProfileMeasurement.java     |   6 +-
 .../metron/profiler/StandAloneProfiler.java     | 100 +++-
 .../org/apache/metron/profiler/clock/Clock.java |  18 +-
 .../metron/profiler/clock/ClockFactory.java     |  38 ++
 .../profiler/clock/DefaultClockFactory.java     |  57 ++
 .../metron/profiler/clock/EventTimeClock.java   |  72 +++
 .../metron/profiler/clock/FixedClock.java       |  39 +-
 .../profiler/clock/FixedClockFactory.java       |  44 ++
 .../apache/metron/profiler/clock/WallClock.java |  17 +-
 .../profiler/DefaultMessageDistributorTest.java | 171 +++++-
 .../profiler/DefaultProfileBuilderTest.java     | 119 +++--
 .../metron/profiler/ProfilePeriodTest.java      |   1 -
 .../metron/profiler/StandAloneProfilerTest.java | 255 +++++++++
 .../profiler/clock/DefaultClockFactoryTest.java |  75 +++
 .../profiler/clock/EventTimeClockTest.java      | 115 +++++
 .../metron/profiler/clock/WallClockTest.java    |  54 ++
 metron-analytics/metron-profiler/README.md      |  98 +++-
 .../src/main/config/profiler.properties         |  14 +-
 .../src/main/flux/profiler/remote.yaml          |  42 +-
 .../profiler/bolt/DestinationHandler.java       |  56 --
 .../bolt/FixedFrequencyFlushSignal.java         | 126 +++++
 .../metron/profiler/bolt/FlushSignal.java       |  51 ++
 .../profiler/bolt/HBaseDestinationHandler.java  |  58 ---
 .../metron/profiler/bolt/HBaseEmitter.java      |  63 +++
 .../profiler/bolt/KafkaDestinationHandler.java  | 110 ----
 .../metron/profiler/bolt/KafkaEmitter.java      | 114 ++++
 .../metron/profiler/bolt/ManualFlushSignal.java |  54 ++
 .../profiler/bolt/ProfileBuilderBolt.java       | 374 +++++++++++---
 .../bolt/ProfileMeasurementEmitter.java         |  59 +++
 .../profiler/bolt/ProfileSplitterBolt.java      | 132 ++++-
 .../zookeeper/event-time-test/profiler.json     |  12 +
 .../bolt/FixedFrequencyFlushSignalTest.java     |  71 +++
 .../bolt/KafkaDestinationHandlerTest.java       | 203 --------
 .../metron/profiler/bolt/KafkaEmitterTest.java  | 208 ++++++++
 .../profiler/bolt/ProfileBuilderBoltTest.java   | 516 +++++++++++--------
 .../profiler/bolt/ProfileHBaseMapperTest.java   |   6 +-
 .../profiler/bolt/ProfileSplitterBoltTest.java  | 288 +++++++++--
 .../profiler/integration/MessageBuilder.java    |  75 +++
 .../integration/ProfilerIntegrationTest.java    | 235 ++++++---
 .../configuration/metron-profiler-env.xml       |  77 ++-
 .../package/scripts/params/params_linux.py      |   7 +
 .../package/templates/profiler.properties.j2    |  15 +-
 .../METRON/CURRENT/themes/metron_theme.json     | 118 ++++-
 .../configuration/profiler/ProfileConfig.java   |  53 ++
 .../configuration/profiler/ProfilerConfig.java  |  48 +-
 .../apache/metron/common/utils/JSONUtils.java   |  11 +-
 .../configurations/ProfilerUpdater.java         |   1 +
 .../profiler/ProfileConfigTest.java             |   5 +-
 .../profiler/ProfilerConfigTest.java            | 120 +++++
 .../integration/components/KafkaComponent.java  |  39 +-
 57 files changed, 3987 insertions(+), 1096 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-client/src/main/java/org/apache/metron/profiler/client/stellar/ProfilerFunctions.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-client/src/main/java/org/apache/metron/profiler/client/stellar/ProfilerFunctions.java b/metron-analytics/metron-profiler-client/src/main/java/org/apache/metron/profiler/client/stellar/ProfilerFunctions.java
index 64c1e2e..d6afe1d 100644
--- a/metron-analytics/metron-profiler-client/src/main/java/org/apache/metron/profiler/client/stellar/ProfilerFunctions.java
+++ b/metron-analytics/metron-profiler-client/src/main/java/org/apache/metron/profiler/client/stellar/ProfilerFunctions.java
@@ -101,7 +101,10 @@ public class ProfilerFunctions {
         throw new IllegalArgumentException("Invalid profiler configuration", e);
       }
 
-      return new StandAloneProfiler(profilerConfig, periodDurationMillis, context);
+      // the TTL and max routes do not matter here
+      long profileTimeToLiveMillis = Long.MAX_VALUE;
+      long maxNumberOfRoutes = Long.MAX_VALUE;
+      return new StandAloneProfiler(profilerConfig, periodDurationMillis, profileTimeToLiveMillis, maxNumberOfRoutes, context);
     }
   }
 
@@ -138,13 +141,8 @@ public class ProfilerFunctions {
 
       // user must provide the stand alone profiler
       StandAloneProfiler profiler = Util.getArg(1, StandAloneProfiler.class, args);
-      try {
-        for (JSONObject message : messages) {
-          profiler.apply(message);
-        }
-
-      } catch (ExecutionException e) {
-        throw new IllegalArgumentException(format("Failed to apply message; error=%s", e.getMessage()), e);
+      for (JSONObject message : messages) {
+        profiler.apply(message);
       }
 
       return profiler;

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java
index 53377a0..ea5126f 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultMessageDistributor.java
@@ -20,14 +20,20 @@
 
 package org.apache.metron.profiler;
 
+import com.google.common.base.Ticker;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
 import org.apache.metron.common.configuration.profiler.ProfileConfig;
-import org.apache.metron.profiler.clock.WallClock;
 import org.apache.metron.stellar.dsl.Context;
 import org.json.simple.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.ExecutionException;
@@ -36,32 +42,81 @@ import java.util.concurrent.TimeUnit;
 import static java.lang.String.format;
 
 /**
- * Distributes a message along a MessageRoute.  A MessageRoute will lead to one or
- * more ProfileBuilders.
+ * The default implementation of a {@link MessageDistributor}.
+ *
+ * <p>Two caches are maintained; one for active profiles and another for expired
+ * profiles.  A profile will remain on the active cache as long as it continues
+ * to receive messages.
+ *
+ * <p>If a profile has not received messages for an extended period of time, it
+ * is expired and moved to the expired cache.  A profile that is expired can no
+ * longer receive new messages.
+ *
+ * <p>A profile is stored in the expired cache for a fixed period of time so that
+ * a client can flush the state of expired profiles.  If the client does not flush
+ * the expired profiles using `flushExpired`, the state of these profiles will be
+ * lost.
  *
- * A ProfileBuilder is responsible for maintaining the state of a single profile,
- * for a single entity.  There will be one ProfileBuilder for each (profile, entity) pair.
- * This class ensures that each ProfileBuilder receives the telemetry messages that
- * it needs.
  */
 public class DefaultMessageDistributor implements MessageDistributor {
 
+  protected static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
   /**
    * The duration of each profile period in milliseconds.
    */
   private long periodDurationMillis;
 
   /**
-   * Maintains the state of a profile which is unique to a profile/entity pair.
+   * A cache of active profiles.
+   *
+   * A profile will remain on the active cache as long as it continues to receive
+   * messages.  Once it has not received messages for a period of time, it is
+   * moved to the expired cache.
+   */
+  private transient Cache<String, ProfileBuilder> activeCache;
+
+  /**
+   * A cache of expired profiles.
+   *
+   * When a profile expires from the active cache, it is moved here for a
+   * period of time.  In the expired cache a profile can no longer receive
+   * new messages.  A profile waits on the expired cache so that the client
+   * can flush the state of the expired profile.  If the client does not flush
+   * the expired profiles, this state will be lost forever.
    */
-  private transient Cache<String, ProfileBuilder> profileCache;
+  private transient Cache<String, ProfileBuilder> expiredCache;
 
   /**
    * Create a new message distributor.
+   *
+   * @param periodDurationMillis The period duration in milliseconds.
+   * @param profileTimeToLiveMillis The time-to-live of a profile in milliseconds.
+   * @param maxNumberOfRoutes The max number of unique routes to maintain.  After this is exceeded, lesser
+   *                          used routes will be evicted from the internal cache.
+   */
+  public DefaultMessageDistributor(
+          long periodDurationMillis,
+          long profileTimeToLiveMillis,
+          long maxNumberOfRoutes) {
+    this(periodDurationMillis, profileTimeToLiveMillis, maxNumberOfRoutes, Ticker.systemTicker());
+  }
+
+  /**
+   * Create a new message distributor.
+   *
    * @param periodDurationMillis The period duration in milliseconds.
-   * @param profileTimeToLiveMillis The TTL of a profile in milliseconds.
+   * @param profileTimeToLiveMillis The time-to-live of a profile in milliseconds.
+   * @param maxNumberOfRoutes The max number of unique routes to maintain.  After this is exceeded, lesser
+   *                          used routes will be evicted from the internal cache.
+   * @param ticker The ticker used to drive time for the caches.  Only needs set for testing.
    */
-  public DefaultMessageDistributor(long periodDurationMillis, long profileTimeToLiveMillis) {
+  public DefaultMessageDistributor(
+          long periodDurationMillis,
+          long profileTimeToLiveMillis,
+          long maxNumberOfRoutes,
+          Ticker ticker) {
+
     if(profileTimeToLiveMillis < periodDurationMillis) {
       throw new IllegalStateException(format(
               "invalid configuration: expect profile TTL (%d) to be greater than period duration (%d)",
@@ -69,9 +124,23 @@ public class DefaultMessageDistributor implements MessageDistributor {
               periodDurationMillis));
     }
     this.periodDurationMillis = periodDurationMillis;
-    this.profileCache = CacheBuilder
+
+    // build the cache of active profiles
+    this.activeCache = CacheBuilder
             .newBuilder()
+            .maximumSize(maxNumberOfRoutes)
             .expireAfterAccess(profileTimeToLiveMillis, TimeUnit.MILLISECONDS)
+            .removalListener(new ActiveCacheRemovalListener())
+            .ticker(ticker)
+            .build();
+
+    // build the cache of expired profiles
+    this.expiredCache = CacheBuilder
+            .newBuilder()
+            .maximumSize(maxNumberOfRoutes)
+            .expireAfterWrite(profileTimeToLiveMillis, TimeUnit.MILLISECONDS)
+            .removalListener(new ExpiredCacheRemovalListener())
+            .ticker(ticker)
             .build();
   }
 
@@ -79,57 +148,120 @@ public class DefaultMessageDistributor implements MessageDistributor {
    * Distribute a message along a MessageRoute.
    *
    * @param message The message that needs distributed.
+   * @param timestamp The timestamp of the message.
    * @param route The message route.
    * @param context The Stellar execution context.
    * @throws ExecutionException
    */
   @Override
-  public void distribute(JSONObject message, MessageRoute route, Context context) throws ExecutionException {
-    getBuilder(route, context).apply(message);
+  public void distribute(JSONObject message, long timestamp, MessageRoute route, Context context) {
+    try {
+      ProfileBuilder builder = getBuilder(route, context);
+      builder.apply(message, timestamp);
+
+    } catch(ExecutionException e) {
+      LOG.error("Unexpected error", e);
+      throw new RuntimeException(e);
+    }
   }
 
   /**
-   * Flushes all profiles.  Flushes all ProfileBuilders that this distributor is responsible for.
+   * Flush all active profiles.
+   *
+   * <p>A profile will remain active as long as it continues to receive messages.  If a profile
+   * does not receive a message for an extended duration, it may be marked as expired.
+   *
+   * <p>Flushes all active {@link ProfileBuilder} objects that this distributor is responsible for.
    *
-   * @return The profile measurements; one for each (profile, entity) pair.
+   * @return The {@link ProfileMeasurement} values; one for each (profile, entity) pair.
    */
   @Override
   public List<ProfileMeasurement> flush() {
+
+    // cache maintenance needed here to ensure active profiles will expire
+    activeCache.cleanUp();
+    expiredCache.cleanUp();
+
+    List<ProfileMeasurement> measurements = flushCache(activeCache);
+    return measurements;
+  }
+
+  /**
+   * Flush all expired profiles.
+   *
+   * <p>Flushes all expired {@link ProfileBuilder}s that this distributor is responsible for.
+   *
+   * <p>If a profile has not received messages for an extended period of time, it will be marked as
+   * expired.  When a profile is expired, it can no longer receive new messages.  Expired profiles
+   * remain only to give the client a chance to flush them.
+   *
+   * <p>If the client does not flush the expired profiles periodically, any state maintained in the
+   * profile since the last flush may be lost.
+   *
+   * @return The {@link ProfileMeasurement} values; one for each (profile, entity) pair.
+   */
+  @Override
+  public List<ProfileMeasurement> flushExpired() {
+
+    // cache maintenance needed here to ensure active profiles will expire
+    activeCache.cleanUp();
+    expiredCache.cleanUp();
+
+    // flush all expired profiles
+    List<ProfileMeasurement> measurements = flushCache(expiredCache);
+
+    // once the expired profiles have been flushed, they are no longer needed
+    expiredCache.invalidateAll();
+
+    return measurements;
+  }
+
+  /**
+   * Flush all of the profiles maintained in a cache.
+   *
+   * @param cache The cache to flush.
+   * @return The measurements captured when flushing the profiles.
+   */
+  private List<ProfileMeasurement> flushCache(Cache<String, ProfileBuilder> cache) {
+
     List<ProfileMeasurement> measurements = new ArrayList<>();
+    for(ProfileBuilder profileBuilder: cache.asMap().values()) {
 
-    profileCache.asMap().forEach((key, profileBuilder) -> {
+      // only need to flush, if the profile has been initialized
       if(profileBuilder.isInitialized()) {
+
+        // flush the profiler and save the measurement, if one exists
         Optional<ProfileMeasurement> measurement = profileBuilder.flush();
-        measurement.ifPresent(measurements::add);
+        measurement.ifPresent(m -> measurements.add(m));
       }
-    });
+    }
 
-    profileCache.cleanUp();
     return measurements;
   }
 
   /**
    * Retrieves the cached ProfileBuilder that is used to build and maintain the Profile.  If none exists,
    * one will be created and returned.
+   *
    * @param route The message route.
    * @param context The Stellar execution context.
    */
   public ProfileBuilder getBuilder(MessageRoute route, Context context) throws ExecutionException {
     ProfileConfig profile = route.getProfileDefinition();
     String entity = route.getEntity();
-    return profileCache.get(
+    return activeCache.get(
             cacheKey(profile, entity),
             () -> new DefaultProfileBuilder.Builder()
                     .withDefinition(profile)
                     .withEntity(entity)
                     .withPeriodDurationMillis(periodDurationMillis)
                     .withContext(context)
-                    .withClock(new WallClock())
                     .build());
   }
 
   /**
-   * Builds the key that is used to lookup the ProfileState within the cache.
+   * Builds the key that is used to lookup the {@link ProfileBuilder} within the cache.
+   *
    * @param profile The profile definition.
    * @param entity The entity.
    */
@@ -145,4 +277,33 @@ public class DefaultMessageDistributor implements MessageDistributor {
   public DefaultMessageDistributor withPeriodDuration(int duration, TimeUnit units) {
     return withPeriodDurationMillis(units.toMillis(duration));
   }
+
+  /**
+   * A listener that is notified when profiles expire from the active cache.
+   */
+  private class ActiveCacheRemovalListener implements RemovalListener<String, ProfileBuilder> {
+
+    @Override
+    public void onRemoval(RemovalNotification<String, ProfileBuilder> notification) {
+
+      String key = notification.getKey();
+      ProfileBuilder expired = notification.getValue();
+
+      LOG.warn("Profile expired from active cache; key={}", key);
+      expiredCache.put(key, expired);
+    }
+  }
+
+  /**
+   * A listener that is notified when profiles expire from the active cache.
+   */
+  private class ExpiredCacheRemovalListener implements RemovalListener<String, ProfileBuilder> {
+
+    @Override
+    public void onRemoval(RemovalNotification<String, ProfileBuilder> notification) {
+
+      String key = notification.getKey();
+      LOG.debug("Profile removed from expired cache; key={}", key);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java
index 2e34160..4b564c9 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/DefaultProfileBuilder.java
@@ -20,7 +20,18 @@
 
 package org.apache.metron.profiler;
 
-import static java.lang.String.format;
+import org.apache.commons.collections4.ListUtils;
+import org.apache.commons.collections4.MapUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.metron.common.configuration.profiler.ProfileConfig;
+import org.apache.metron.stellar.common.DefaultStellarStatefulExecutor;
+import org.apache.metron.stellar.common.StellarStatefulExecutor;
+import org.apache.metron.stellar.dsl.Context;
+import org.apache.metron.stellar.dsl.ParseException;
+import org.apache.metron.stellar.dsl.StellarFunctions;
+import org.json.simple.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.Serializable;
 import java.lang.invoke.MethodHandles;
@@ -34,20 +45,8 @@ import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
-import org.apache.commons.collections4.ListUtils;
-import org.apache.commons.collections4.MapUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.metron.common.configuration.profiler.ProfileConfig;
-import org.apache.metron.profiler.clock.Clock;
-import org.apache.metron.profiler.clock.WallClock;
-import org.apache.metron.stellar.common.DefaultStellarStatefulExecutor;
-import org.apache.metron.stellar.common.StellarStatefulExecutor;
-import org.apache.metron.stellar.dsl.Context;
-import org.apache.metron.stellar.dsl.ParseException;
-import org.apache.metron.stellar.dsl.StellarFunctions;
-import org.json.simple.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import static java.lang.String.format;
 
 /**
  * Responsible for building and maintaining a Profile.
@@ -94,16 +93,15 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
   private long periodDurationMillis;
 
   /**
-   * A clock is used to tell time; imagine that.
+   * Tracks the latest timestamp for use when flushing the profile.
    */
-  private Clock clock;
+  private long maxTimestamp;
 
   /**
-   * Use the ProfileBuilder.Builder to create a new ProfileBuilder.
+   * Private constructor.  Use the {@link Builder} to create a new {@link ProfileBuilder).
    */
   private DefaultProfileBuilder(ProfileConfig definition,
                                 String entity,
-                                Clock clock,
                                 long periodDurationMillis,
                                 Context stellarContext) {
 
@@ -111,27 +109,37 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
     this.definition = definition;
     this.profileName = definition.getProfile();
     this.entity = entity;
-    this.clock = clock;
     this.periodDurationMillis = periodDurationMillis;
     this.executor = new DefaultStellarStatefulExecutor();
     StellarFunctions.initialize(stellarContext);
     this.executor.setContext(stellarContext);
+    this.maxTimestamp = 0;
   }
 
   /**
    * Apply a message to the profile.
+   *
    * @param message The message to apply.
+   * @param timestamp The timestamp of the message.
    */
   @Override
-  public void apply(JSONObject message) {
+  public void apply(JSONObject message, long timestamp) {
     try {
       if (!isInitialized()) {
+
+        // execute each 'init' expression
         assign(definition.getInit(), message, "init");
         isInitialized = true;
       }
 
+      // execute each 'update' expression
       assign(definition.getUpdate(), message, "update");
 
+      // keep track of the 'latest' timestamp seen for use when flushing the profile
+      if(timestamp > maxTimestamp) {
+        maxTimestamp = timestamp;
+      }
+
     } catch(Throwable e) {
       LOG.error(format("Unable to apply message to profile: %s", e.getMessage()), e);
     }
@@ -140,23 +148,30 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
   /**
    * Flush the Profile.
    *
-   * Completes and emits the ProfileMeasurement.  Clears all state in preparation for
+   * <p>Completes and emits the {@link ProfileMeasurement}.  Clears all state in preparation for
    * the next window period.
    *
-   * @return Returns the completed profile measurement.
+   * @return Returns the completed {@link ProfileMeasurement}.
    */
   @Override
   public Optional<ProfileMeasurement> flush() {
-    LOG.debug("Flushing profile: profile={}, entity={}", profileName, entity);
-    Optional<ProfileMeasurement> result = Optional.empty();
-    ProfilePeriod period = new ProfilePeriod(clock.currentTimeMillis(), periodDurationMillis, TimeUnit.MILLISECONDS);
+
+    Optional<ProfileMeasurement> result;
+    ProfilePeriod period = new ProfilePeriod(maxTimestamp, periodDurationMillis, TimeUnit.MILLISECONDS);
 
     try {
-      // execute the 'profile' expression(s)
-      Object profileValue = execute(definition.getResult().getProfileExpressions().getExpression(), "result/profile");
+      // execute the 'profile' expression
+      String profileExpression = definition
+              .getResult()
+              .getProfileExpressions()
+              .getExpression();
+      Object profileValue = execute(profileExpression, "result/profile");
 
       // execute the 'triage' expression(s)
-      Map<String, Object> triageValues = definition.getResult().getTriageExpressions().getExpressions()
+      Map<String, Object> triageValues = definition
+              .getResult()
+              .getTriageExpressions()
+              .getExpressions()
               .entrySet()
               .stream()
               .collect(Collectors.toMap(
@@ -185,10 +200,21 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
               .withDefinition(definition));
 
     } catch(Throwable e) {
+
       // if any of the Stellar expressions fail, a measurement should NOT be returned
       LOG.error(format("Unable to flush profile: error=%s", e.getMessage()), e);
+      result = Optional.empty();
     }
 
+    LOG.debug("Flushed profile: profile={}, entity={}, maxTime={}, period={}, start={}, end={}, duration={}",
+            profileName,
+            entity,
+            maxTimestamp,
+            period.getPeriod(),
+            period.getStartTimeMillis(),
+            period.getEndTimeMillis(),
+            period.getDurationMillis());
+
     isInitialized = false;
     return result;
   }
@@ -214,6 +240,7 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
 
   /**
    * Executes an expression contained within the profile definition.
+   *
    * @param expression The expression to execute.
    * @param transientState Additional transient state provided to the expression.
    * @param expressionType The type of expression; init, update, result.  Provides additional context if expression execution fails.
@@ -232,6 +259,7 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
 
   /**
    * Executes an expression contained within the profile definition.
+   *
    * @param expression The expression to execute.
    * @param expressionType The type of expression; init, update, result.  Provides additional context if expression execution fails.
    * @return The result of executing the expression.
@@ -242,6 +270,7 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
 
   /**
    * Executes a set of expressions whose results need to be assigned to a variable.
+   *
    * @param expressions Maps the name of a variable to the expression whose result should be assigned to it.
    * @param transientState Additional transient state provided to the expression.
    * @param expressionType The type of expression; init, update, result.  Provides additional context if expression execution fails.
@@ -254,6 +283,7 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
       String expr = entry.getValue();
 
       try {
+
         // assign the result of the expression to the variable
         executor.assign(var, expr, transientState);
 
@@ -274,6 +304,7 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
 
   /**
    * Executes the expressions contained within the profile definition.
+   *
    * @param expressions A list of expressions to execute.
    * @param transientState Additional transient state provided to the expressions.
    * @param expressionType The type of expression; init, update, result.  Provides additional context if expression execution fails.
@@ -284,6 +315,7 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
 
     for(String expr: ListUtils.emptyIfNull(expressions)) {
       try {
+
         // execute an expression
         Object result = executor.execute(expr, transientState, Object.class);
         results.add(result);
@@ -305,15 +337,19 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
     return results;
   }
 
+  @Override
+  public String getEntity() {
+    return entity;
+  }
+
   /**
-   * A builder used to construct a new ProfileBuilder.
+   * A builder should be used to construct a new {@link ProfileBuilder} object.
    */
   public static class Builder {
 
     private ProfileConfig definition;
     private String entity;
-    private long periodDurationMillis;
-    private Clock clock = new WallClock();
+    private Long periodDurationMillis;
     private Context context;
 
     public Builder withContext(Context context) {
@@ -321,11 +357,6 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
       return this;
     }
 
-    public Builder withClock(Clock clock) {
-      this.clock = clock;
-      return this;
-    }
-
     /**
      * @param definition The profiler definition.
      */
@@ -370,8 +401,11 @@ public class DefaultProfileBuilder implements ProfileBuilder, Serializable {
       if(StringUtils.isEmpty(entity)) {
         throw new IllegalArgumentException(format("missing entity name; got '%s'", entity));
       }
+      if(periodDurationMillis == null) {
+        throw new IllegalArgumentException("missing period duration");
+      }
 
-      return new DefaultProfileBuilder(definition, entity, clock, periodDurationMillis, context);
+      return new DefaultProfileBuilder(definition, entity, periodDurationMillis, context);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageDistributor.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageDistributor.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageDistributor.java
index a60446f..ea5be0f 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageDistributor.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageDistributor.java
@@ -24,33 +24,57 @@ import org.apache.metron.stellar.dsl.Context;
 import org.json.simple.JSONObject;
 
 import java.util.List;
-import java.util.concurrent.ExecutionException;
 
 /**
- * Distributes a message along a MessageRoute.  A MessageRoute will lead to one or
- * more ProfileBuilders.
+ * Distributes a telemetry message along a {@link MessageRoute}. A {@link MessageRoute} will lead to a
+ * {@link ProfileBuilder} that is responsible for building and maintaining a profile.
  *
- * A ProfileBuilder is responsible for maintaining the state of a single profile,
- * for a single entity.  There will be one ProfileBuilder for each (profile, entity) pair.
- * This class ensures that each ProfileBuilder receives the telemetry messages that
- * it needs.
+ * <p>A {@link ProfileBuilder} is responsible for maintaining the state of a single (profile, entity)
+ * pairing.  There will be one {@link ProfileBuilder} for each (profile, entity) pair.
+ *
+ * <p>A {@link MessageDistributor} ensures that each {@link ProfileBuilder} receives the telemetry
+ * messages that it needs.
+ *
+ * @see MessageRoute
+ * @see ProfileMeasurement
  */
 public interface MessageDistributor {
 
   /**
-   * Distribute a message along a MessageRoute.
+   * Distribute a message along a {@link MessageRoute}.
    *
    * @param message The message that needs distributed.
+   * @param timestamp The timestamp of the message.
    * @param route The message route.
    * @param context The Stellar execution context.
-   * @throws ExecutionException
    */
-  void distribute(JSONObject message, MessageRoute route, Context context) throws ExecutionException;
+  void distribute(JSONObject message, long timestamp, MessageRoute route, Context context);
 
   /**
-   * Flushes all profiles.  Flushes all ProfileBuilders that this distributor is responsible for.
+   * Flush all active profiles.
+   *
+   * <p>A profile will remain active as long as it continues to receive messages.  If a profile
+   * does not receive a message for an extended duration, it may be marked as expired.
    *
-   * @return The profile measurements; one for each (profile, entity) pair.
+   * <p>Flushes all active {@link ProfileBuilder} objects that this distributor is responsible for.
+   *
+   * @return The {@link ProfileMeasurement} values; one for each (profile, entity) pair.
    */
   List<ProfileMeasurement> flush();
+
+  /**
+   * Flush all expired profiles.
+   *
+   * <p>If a profile has not received messages for an extended period of time, it will be marked as
+   * expired.  When a profile is expired, it can no longer receive new messages.  Expired profiles
+   * remain only to give the client a chance to flush them.
+   *
+   * <p>If the client does not flush the expired profiles periodically, any state maintained in the
+   * profile since the last flush may be lost.
+   *
+   * <p>Flushes all expired {@link ProfileBuilder} objects that this distributor is responsible for.
+   *
+   * @return The {@link ProfileMeasurement} values; one for each (profile, entity) pair.
+   */
+  List<ProfileMeasurement> flushExpired();
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRoute.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRoute.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRoute.java
index 1945671..7288f03 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRoute.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRoute.java
@@ -23,12 +23,15 @@ package org.apache.metron.profiler;
 import org.apache.metron.common.configuration.profiler.ProfileConfig;
 
 /**
- * A MessageRoute defines the profile and entity that a telemetry message needs applied to.  This
- * allows a message to be routed to the profile and entity that needs it.
+ * Defines the 'route' a message must take through the Profiler.
  *
- * One telemetry message may need multiple routes.  This is the case when a message is needed by
- * more than one profile.  In this case, there will be multiple MessageRoute objects for a single
- * message.
+ * <p>A {@link MessageRoute} defines the profile and entity that a telemetry message needs applied to.
+ *
+ * <p>If a message is needed by multiple profiles, then multiple {@link MessageRoute} values
+ * will exist.  If a message is not needed by any profiles, then no {@link MessageRoute} values
+ * will exist.
+ *
+ * @see MessageRouter
  */
 public class MessageRoute {
 
@@ -42,6 +45,12 @@ public class MessageRoute {
    */
   private String entity;
 
+  /**
+   * Create a {@link MessageRoute}.
+   *
+   * @param profileDefinition The profile definition.
+   * @param entity The entity.
+   */
   public MessageRoute(ProfileConfig profileDefinition, String entity) {
     this.entity = entity;
     this.profileDefinition = profileDefinition;

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRouter.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRouter.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRouter.java
index 99c98a3..4c18062 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRouter.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/MessageRouter.java
@@ -27,15 +27,18 @@ import org.json.simple.JSONObject;
 import java.util.List;
 
 /**
- * Routes incoming telemetry messages.
+ * Routes incoming telemetry messages through the Profiler.
  *
- * A single telemetry message may need to take multiple routes.  This is the case
- * when a message is needed by more than one profile.
+ * <p>If a message is needed by multiple profiles, then multiple {@link MessageRoute} values
+ * will be returned.  If a message is not needed by any profiles, then no {@link MessageRoute} values
+ * will be returned.
+ *
+ * @see MessageRoute
  */
 public interface MessageRouter {
 
   /**
-   * Route a telemetry message.  Finds all routes for a given telemetry message.
+   * Finds all routes for a telemetry message.
    *
    * @param message The telemetry message that needs routed.
    * @param config The configuration for the Profiler.

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileBuilder.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileBuilder.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileBuilder.java
index c09b0b6..07372d7 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileBuilder.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileBuilder.java
@@ -28,47 +28,61 @@ import java.util.Optional;
 /**
  * Responsible for building and maintaining a Profile.
  *
- * One or more messages are applied to the Profile with `apply` and a profile measurement is
- * produced by calling `flush`.
+ * <p>Telemetry messages are applied to a profile using {@link ProfileBuilder#apply(JSONObject, long)}.  A
+ * {@link ProfileMeasurement} is generated by calling {@link ProfileBuilder#flush()}.
  *
- * Any one instance is responsible only for building the profile for a specific [profile, entity]
- * pairing.  There will exist many instances, one for each [profile, entity] pair that exists
+ * A {@link ProfileBuilder} is responsible only for building the profile for a specific [profile, entity]
+ * pair.  There will exist many instances, one for each [profile, entity] pair that exists
  * within the incoming telemetry data applied to the profile.
  */
 public interface ProfileBuilder {
 
   /**
    * Apply a message to the profile.
+   *
    * @param message The message to apply.
+   * @param timestamp The timestamp of the message.
    */
-  void apply(JSONObject message);
+  void apply(JSONObject message, long timestamp);
 
   /**
    * Flush the Profile.
    *
-   * Completes and emits the ProfileMeasurement.  Clears all state in preparation for
+   * <p>Completes the period and returns the {@link ProfileMeasurement}.  Clears all state in preparation for
    * the next window period.
    *
-   * @return Returns the completed profile measurement.
+   * @return Returns the {@link ProfileMeasurement}.
    */
   Optional<ProfileMeasurement> flush();
 
   /**
-   * Has the ProfileBuilder been initialized?
+   * Has the {@link ProfileBuilder} been initialized?
+   *
    * @return True, if initialization has occurred.  False, otherwise.
    */
   boolean isInitialized();
 
   /**
    * Returns the definition of the profile being built.
-   * @return ProfileConfig definition of the profile
+   *
+   * @return The profile definition.
    */
   ProfileConfig getDefinition();
 
   /**
-   * Returns the value of a variable being maintained by the builder.
+   * Returns the value of a variable within the current profile state.
+   *
    * @param variable The variable name.
    * @return The value of the variable.
    */
   Object valueOf(String variable);
+
+  /**
+   * Returns the name of the entity.
+   *
+   * <p>Each {@code ProfileBuilder} instance is responsible for one (profile, entity) pair.
+   *
+   * @return The entity.
+   */
+  String getEntity();
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileMeasurement.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileMeasurement.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileMeasurement.java
index 0e773e9..f6cc286 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileMeasurement.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/ProfileMeasurement.java
@@ -28,10 +28,10 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
 /**
- * Represents a single data point within a Profile.
+ * Represents a single data point within a profile.
  *
- * A Profile is effectively a time series.  To this end a Profile is composed
- * of many ProfileMeasurement values which in aggregate form a time series.
+ * <p>A profile contains many individual {@link ProfileMeasurement} values captured over a
+ * period of time.  These values in aggregate form a time series.
  */
 public class ProfileMeasurement {
 

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/StandAloneProfiler.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/StandAloneProfiler.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/StandAloneProfiler.java
index 6db7079..f79efe6 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/StandAloneProfiler.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/StandAloneProfiler.java
@@ -21,18 +21,29 @@
 package org.apache.metron.profiler;
 
 import org.apache.metron.common.configuration.profiler.ProfilerConfig;
+import org.apache.metron.profiler.clock.Clock;
+import org.apache.metron.profiler.clock.ClockFactory;
+import org.apache.metron.profiler.clock.DefaultClockFactory;
 import org.apache.metron.stellar.dsl.Context;
 import org.json.simple.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.lang.invoke.MethodHandles;
 import java.util.List;
+import java.util.Optional;
 import java.util.concurrent.ExecutionException;
 
 /**
- * A stand alone version of the Profiler that does not require a
- * distributed execution environment like Apache Storm.
+ * A stand alone version of the Profiler that does not require a distributed
+ * execution environment like Apache Storm.
+ *
+ * <p>This class is used to create and manage profiles within the REPL environment.
  */
 public class StandAloneProfiler {
 
+  protected static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
   /**
    * The Stellar execution context.
    */
@@ -54,6 +65,11 @@ public class StandAloneProfiler {
   private MessageDistributor distributor;
 
   /**
+   * The factory that creates Clock objects.
+   */
+  private ClockFactory clockFactory;
+
+  /**
    * Counts the number of messages that have been applied.
    */
   private int messageCount;
@@ -67,12 +83,26 @@ public class StandAloneProfiler {
    */
   private int routeCount;
 
-  public StandAloneProfiler(ProfilerConfig config, long periodDurationMillis, Context context) {
+  /**
+   * Create a new Profiler.
+   *
+   * @param config The Profiler configuration.
+   * @param periodDurationMillis The period duration in milliseconds.
+   * @param profileTimeToLiveMillis The time-to-live of a profile in milliseconds.
+   * @param maxNumberOfRoutes The max number of unique routes to maintain.  After this is exceeded, lesser
+   *                          used routes will be evicted from the internal cache.
+   * @param context The Stellar execution context.
+   */
+  public StandAloneProfiler(ProfilerConfig config,
+                            long periodDurationMillis,
+                            long profileTimeToLiveMillis,
+                            long maxNumberOfRoutes,
+                            Context context) {
     this.context = context;
     this.config = config;
     this.router = new DefaultMessageRouter(context);
-    // the period TTL does not matter in this context
-    this.distributor = new DefaultMessageDistributor(periodDurationMillis, Long.MAX_VALUE);
+    this.distributor = new DefaultMessageDistributor(periodDurationMillis, profileTimeToLiveMillis, maxNumberOfRoutes);
+    this.clockFactory = new DefaultClockFactory();
     this.messageCount = 0;
     this.routeCount = 0;
   }
@@ -80,26 +110,28 @@ public class StandAloneProfiler {
   /**
    * Apply a message to a set of profiles.
    * @param message The message to apply.
-   * @throws ExecutionException
    */
-  public void apply(JSONObject message) throws ExecutionException {
+  public void apply(JSONObject message) {
 
-    List<MessageRoute> routes = router.route(message, config, context);
-    for(MessageRoute route : routes) {
-      distributor.distribute(message, route, context);
-    }
+    // what time is it?
+    Clock clock = clockFactory.createClock(config);
+    Optional<Long> timestamp = clock.currentTimeMillis(message);
 
-    routeCount += routes.size();
-    messageCount += 1;
-  }
+    // can only route the message, if we have a timestamp
+    if(timestamp.isPresent()) {
 
-  @Override
-  public String toString() {
-    return "Profiler{" +
-            getProfileCount() + " profile(s), " +
-            getMessageCount() + " messages(s), " +
-            getRouteCount() + " route(s)" +
-            '}';
+      // route the message to the correct profile builders
+      List<MessageRoute> routes = router.route(message, config, context);
+      for (MessageRoute route : routes) {
+        distributor.distribute(message, timestamp.get(), route, context);
+      }
+
+      routeCount += routes.size();
+      messageCount += 1;
+
+    } else {
+      LOG.warn("No timestamp available for the message. The message will be ignored.");
+    }
   }
 
   /**
@@ -110,19 +142,45 @@ public class StandAloneProfiler {
     return distributor.flush();
   }
 
+  /**
+   * Returns the Profiler configuration.
+   * @return The Profiler configuration.
+   */
   public ProfilerConfig getConfig() {
     return config;
   }
 
+  /**
+   * Returns the number of defined profiles.
+   * @return The number of defined profiles.
+   */
   public int getProfileCount() {
     return (config == null) ? 0: config.getProfiles().size();
   }
 
+  /**
+   * Returns the number of messages that have been applied.
+   * @return The number of messages that have been applied.
+   */
   public int getMessageCount() {
     return messageCount;
   }
 
+  /**
+   * Returns the number of routes.
+   * @return The number of routes.
+   * @see MessageRoute
+   */
   public int getRouteCount() {
     return routeCount;
   }
+
+  @Override
+  public String toString() {
+    return "Profiler{" +
+            getProfileCount() + " profile(s), " +
+            getMessageCount() + " messages(s), " +
+            getRouteCount() + " route(s)" +
+            '}';
+  }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/Clock.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/Clock.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/Clock.java
index 6730e49..b07c0ed 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/Clock.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/Clock.java
@@ -20,16 +20,24 @@
 
 package org.apache.metron.profiler.clock;
 
+import org.json.simple.JSONObject;
+
+import java.util.Optional;
+
 /**
- * A clock can tell time; imagine that.
+ * A {@link Clock} manages the progression of time in the Profiler.
  *
- * This allows the Profiler to support different treatments of time like wall clock versus event time.
+ * <p>The Profiler can operate on either processing time or event time.  This
+ * abstraction deals with the differences between the two.
  */
 public interface Clock {
 
   /**
-   * The current time in epoch milliseconds.
+   * Returns the current time in epoch milliseconds.
+   *
+   * @param message The telemetry message.
+   * @return An optional value containing the current time in epoch milliseconds, if
+   *         the current time is known.  Otherwise, empty.
    */
-  long currentTimeMillis();
-
+  Optional<Long> currentTimeMillis(JSONObject message);
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/ClockFactory.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/ClockFactory.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/ClockFactory.java
new file mode 100644
index 0000000..5435c48
--- /dev/null
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/ClockFactory.java
@@ -0,0 +1,38 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.clock;
+
+import org.apache.metron.common.configuration.profiler.ProfilerConfig;
+
+/**
+ * A factory for creating {@link Clock} objects.
+ *
+ * The type of {@link Clock} needed will depend on the Profiler configuration.
+ */
+public interface ClockFactory {
+
+  /**
+   * Creates and returns a {@link Clock}.
+   *
+   * @param config The profiler configuration.
+   * @return A {@link Clock}.
+   */
+  Clock createClock(ProfilerConfig config);
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/DefaultClockFactory.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/DefaultClockFactory.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/DefaultClockFactory.java
new file mode 100644
index 0000000..d62e62b
--- /dev/null
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/DefaultClockFactory.java
@@ -0,0 +1,57 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.clock;
+
+import org.apache.metron.common.configuration.profiler.ProfilerConfig;
+
+/**
+ * Creates a {@link Clock} based on the profiler configuration.
+ *
+ * <p>If the Profiler is configured to use event time, a {@link EventTimeClock} will
+ * be created.  Otherwise, a {@link WallClock} will be created.
+ *
+ * <p>The default implementation of a {@link ClockFactory}.
+ */
+public class DefaultClockFactory implements ClockFactory {
+
+  /**
+   * @param config The profiler configuration.
+   * @return The appropriate Clock based on the profiler configuration.
+   */
+  @Override
+  public Clock createClock(ProfilerConfig config) {
+    Clock clock;
+
+    boolean isEventTime = config.getTimestampField().isPresent();
+    if(isEventTime) {
+
+      // using event time
+      String timestampField = config.getTimestampField().get();
+      clock = new EventTimeClock(timestampField);
+
+    } else {
+
+      // using processing time
+      clock = new WallClock();
+    }
+
+    return clock;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/EventTimeClock.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/EventTimeClock.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/EventTimeClock.java
new file mode 100644
index 0000000..5cd574e
--- /dev/null
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/EventTimeClock.java
@@ -0,0 +1,72 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.clock;
+
+import org.apache.metron.stellar.common.utils.ConversionUtils;
+import org.json.simple.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.invoke.MethodHandles;
+import java.util.Optional;
+
+/**
+ * A {@link Clock} that advances based on event time.
+ *
+ * Event time is advanced by the timestamps contained within telemetry messages, rather
+ * than the system clock.
+ */
+public class EventTimeClock implements Clock {
+
+  protected static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  /**
+   * The name of the field from which the timestamp will
+   */
+  private String timestampField;
+
+  /**
+   * @param timestampField The name of the field containing a timestamp.
+   */
+  public EventTimeClock(String timestampField) {
+    this.timestampField = timestampField;
+  }
+
+  @Override
+  public Optional<Long> currentTimeMillis(JSONObject message) {
+
+    Long result;
+    if(message != null && message.containsKey(timestampField)) {
+
+      // extract the timestamp and convert to a long
+      Object timestamp = message.get(timestampField);
+      result = ConversionUtils.convert(timestamp, Long.class);
+
+    } else {
+
+      // the message does not contain the specified timestamp field
+      LOG.debug("message does not contain timestamp field '{}': message will be ignored: message='{}'",
+              timestampField, JSONObject.toJSONString(message));
+      result = null;
+    }
+
+    return Optional.ofNullable(result);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/FixedClock.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/FixedClock.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/FixedClock.java
index c6e93cd..8259ed0 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/FixedClock.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/FixedClock.java
@@ -20,21 +20,50 @@
 
 package org.apache.metron.profiler.clock;
 
-import java.io.Serializable;
+import org.json.simple.JSONObject;
+
+import java.util.Optional;
 
 /**
- * A clock that reports whatever time you tell it to.  Most useful for testing.
+ * A {@link Clock} that always reports the same time.
+ *
+ * <p>This is only useful for testing.
  */
-public class FixedClock implements Clock, Serializable {
+public class FixedClock implements Clock {
 
+  /**
+   * The time in milliseconds since the epoch.
+   */
   private long epochMillis;
 
+  /**
+   * Create a {@link Clock}.  The time defaults to the epoch.
+   */
+  public FixedClock() {
+    this(0);
+  }
+
+  /**
+   * Create a {@link Clock}.
+   * @param epochMillis The time in milliseconds since the epoch.
+   */
+  public FixedClock(long epochMillis) {
+    this.setTime(epochMillis);
+  }
+
+  /**
+   * Set the current time.
+   * @param epochMillis The time in milliseconds since the epoch.
+   */
   public void setTime(long epochMillis) {
     this.epochMillis = epochMillis;
   }
 
+  /**
+   * @return The time in milliseconds since the epoch.
+   */
   @Override
-  public long currentTimeMillis() {
-    return this.epochMillis;
+  public Optional<Long> currentTimeMillis(JSONObject message) {
+    return Optional.of(this.epochMillis);
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/FixedClockFactory.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/FixedClockFactory.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/FixedClockFactory.java
new file mode 100644
index 0000000..b0248cd
--- /dev/null
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/FixedClockFactory.java
@@ -0,0 +1,44 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.profiler.clock;
+
+import org.apache.metron.common.configuration.profiler.ProfilerConfig;
+
+/**
+ * A {@link ClockFactory} that always returns a {@link FixedClock}.
+ *
+ * <p>A {@link FixedClock} always returns the same time and is only useful for testing.
+ */
+public class FixedClockFactory implements ClockFactory {
+
+  private long timestamp;
+
+  /**
+   * @param timestamp The timestamp that all {@link Clock} objects created by this factory will report.
+   */
+  public FixedClockFactory(long timestamp) {
+    this.timestamp = timestamp;
+  }
+
+  @Override
+  public Clock createClock(ProfilerConfig config) {
+    return new FixedClock(timestamp);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/WallClock.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/WallClock.java b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/WallClock.java
index 1a20c94..20f62e3 100644
--- a/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/WallClock.java
+++ b/metron-analytics/metron-profiler-common/src/main/java/org/apache/metron/profiler/clock/WallClock.java
@@ -20,15 +20,22 @@
 
 package org.apache.metron.profiler.clock;
 
-import java.io.Serializable;
+import org.json.simple.JSONObject;
+
+import java.util.Optional;
 
 /**
- * A clock that uses the system clock to provide wall clock time.
+ * A {@link Clock} that advances based on system time.
+ *
+ * <p>This {@link Clock} is used to advance time when the Profiler is running
+ * on processing time, rather than event time.
  */
-public class WallClock implements Clock, Serializable {
+public class WallClock implements Clock {
 
   @Override
-  public long currentTimeMillis() {
-    return System.currentTimeMillis();
+  public Optional<Long> currentTimeMillis(JSONObject message) {
+
+    // the message does not matter; use system time
+    return Optional.of(System.currentTimeMillis());
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultMessageDistributorTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultMessageDistributorTest.java b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultMessageDistributorTest.java
index ff4c289..ea9c5c6 100644
--- a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultMessageDistributorTest.java
+++ b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultMessageDistributorTest.java
@@ -20,6 +20,7 @@
 
 package org.apache.metron.profiler;
 
+import com.google.common.base.Ticker;
 import org.adrianwalker.multilinestring.Multiline;
 import org.apache.metron.common.configuration.profiler.ProfileConfig;
 import org.apache.metron.common.utils.JSONUtils;
@@ -33,6 +34,9 @@ import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 
+import static java.util.concurrent.TimeUnit.HOURS;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.MINUTES;
 import static org.junit.Assert.assertEquals;
 
 public class DefaultMessageDistributorTest {
@@ -83,16 +87,22 @@ public class DefaultMessageDistributorTest {
 
   private DefaultMessageDistributor distributor;
   private Context context;
+  private long periodDurationMillis = MINUTES.toMillis(15);
+  private long profileTimeToLiveMillis = MINUTES.toMillis(30);
+  private long maxNumberOfRoutes = Long.MAX_VALUE;
 
   @Before
   public void setup() throws Exception {
+
     context = Context.EMPTY_CONTEXT();
     JSONParser parser = new JSONParser();
     messageOne = (JSONObject) parser.parse(inputOne);
     messageTwo = (JSONObject) parser.parse(inputTwo);
+
     distributor = new DefaultMessageDistributor(
-            TimeUnit.MINUTES.toMillis(15),
-            TimeUnit.MINUTES.toMillis(30));
+            periodDurationMillis,
+            profileTimeToLiveMillis,
+            maxNumberOfRoutes);
   }
 
   /**
@@ -108,15 +118,18 @@ public class DefaultMessageDistributorTest {
    */
   @Test
   public void testDistribute() throws Exception {
+
+    // setup
+    long timestamp = 100;
     ProfileConfig definition = createDefinition(profileOne);
     String entity = (String) messageOne.get("ip_src_addr");
     MessageRoute route = new MessageRoute(definition, entity);
 
-    // distribute one message
-    distributor.distribute(messageOne, route, context);
+    // distribute one message and flush
+    distributor.distribute(messageOne, timestamp, route, context);
+    List<ProfileMeasurement> measurements = distributor.flush();
 
     // expect one measurement coming from one profile
-    List<ProfileMeasurement> measurements = distributor.flush();
     assertEquals(1, measurements.size());
     ProfileMeasurement m = measurements.get(0);
     assertEquals(definition.getProfile(), m.getProfileName());
@@ -126,12 +139,17 @@ public class DefaultMessageDistributorTest {
   @Test
   public void testDistributeWithTwoProfiles() throws Exception {
 
-    // distribute one message to the first profile
+    // setup
+    long timestamp = 100;
     String entity = (String) messageOne.get("ip_src_addr");
-    distributor.distribute(messageOne, new MessageRoute(createDefinition(profileOne), entity), context);
+
+    // distribute one message to the first profile
+    MessageRoute routeOne = new MessageRoute(createDefinition(profileOne), entity);
+    distributor.distribute(messageOne, timestamp, routeOne, context);
 
     // distribute another message to the second profile, but same entity
-    distributor.distribute(messageOne, new MessageRoute(createDefinition(profileTwo), entity), context);
+    MessageRoute routeTwo = new MessageRoute(createDefinition(profileTwo), entity);
+    distributor.distribute(messageOne, timestamp, routeTwo, context);
 
     // expect 2 measurements; 1 for each profile
     List<ProfileMeasurement> measurements = distributor.flush();
@@ -141,17 +159,150 @@ public class DefaultMessageDistributorTest {
   @Test
   public void testDistributeWithTwoEntities() throws Exception {
 
+    // setup
+    long timestamp = 100;
+
     // distribute one message
     String entityOne = (String) messageOne.get("ip_src_addr");
-    distributor.distribute(messageOne, new MessageRoute(createDefinition(profileOne), entityOne), context);
+    MessageRoute routeOne = new MessageRoute(createDefinition(profileOne), entityOne);
+    distributor.distribute(messageOne, timestamp, routeOne, context);
 
     // distribute another message with a different entity
     String entityTwo = (String) messageTwo.get("ip_src_addr");
-    distributor.distribute(messageTwo, new MessageRoute(createDefinition(profileTwo), entityTwo), context);
+    MessageRoute routeTwo =  new MessageRoute(createDefinition(profileTwo), entityTwo);
+    distributor.distribute(messageTwo, timestamp, routeTwo, context);
 
     // expect 2 measurements; 1 for each entity
     List<ProfileMeasurement> measurements = distributor.flush();
     assertEquals(2, measurements.size());
   }
 
+  /**
+   * A profile should expire after a fixed period of time.  This test ensures that
+   * profiles are not expired before they are supposed to be.
+   */
+  @Test
+  public void testNotYetTimeToExpireProfiles() throws Exception {
+
+    // the ticker drives time to allow us to test cache expiration
+    FixedTicker ticker = new FixedTicker();
+
+    // setup
+    ProfileConfig definition = createDefinition(profileOne);
+    String entity = (String) messageOne.get("ip_src_addr");
+    MessageRoute route = new MessageRoute(definition, entity);
+    distributor = new DefaultMessageDistributor(
+            periodDurationMillis,
+            profileTimeToLiveMillis,
+            maxNumberOfRoutes,
+            ticker);
+
+    // distribute one message
+    distributor.distribute(messageOne, 1000000, route, context);
+
+    // advance time to just shy of the profile TTL
+    ticker.advanceTime(profileTimeToLiveMillis - 1000, MILLISECONDS);
+
+    // the profile should NOT have expired yet
+    assertEquals(0, distributor.flushExpired().size());
+    assertEquals(1, distributor.flush().size());
+  }
+
+  /**
+   * A profile should expire after a fixed period of time.
+   */
+  @Test
+  public void testProfilesShouldExpire() throws Exception {
+
+    // the ticker drives time to allow us to test cache expiration
+    FixedTicker ticker = new FixedTicker();
+
+    // setup
+    ProfileConfig definition = createDefinition(profileOne);
+    String entity = (String) messageOne.get("ip_src_addr");
+    MessageRoute route = new MessageRoute(definition, entity);
+    distributor = new DefaultMessageDistributor(
+            periodDurationMillis,
+            profileTimeToLiveMillis,
+            maxNumberOfRoutes,
+            ticker);
+
+    // distribute one message
+    distributor.distribute(messageOne, 100000, route, context);
+
+    // advance time to just beyond the period duration
+    ticker.advanceTime(profileTimeToLiveMillis + 1000, MILLISECONDS);
+
+    // the profile should have expired by now
+    assertEquals(1, distributor.flushExpired().size());
+    assertEquals(0, distributor.flush().size());
+  }
+
+  /**
+   * An expired profile is only kept around for a fixed period of time.  It should be removed, if it
+   * has been on the expired cache for too long.
+   */
+  @Test
+  public void testExpiredProfilesShouldBeRemoved() throws Exception {
+
+    // the ticker drives time to allow us to test cache expiration
+    FixedTicker ticker = new FixedTicker();
+
+    // setup
+    ProfileConfig definition = createDefinition(profileOne);
+    String entity = (String) messageOne.get("ip_src_addr");
+    MessageRoute route = new MessageRoute(definition, entity);
+    distributor = new DefaultMessageDistributor(
+            periodDurationMillis,
+            profileTimeToLiveMillis,
+            maxNumberOfRoutes,
+            ticker);
+
+    // distribute one message
+    distributor.distribute(messageOne, 1000000, route, context);
+
+    // advance time a couple of hours
+    ticker.advanceTime(2, HOURS);
+
+    // the profile should have been expired
+    assertEquals(0, distributor.flush().size());
+
+    // advance time a couple of hours
+    ticker.advanceTime(2, HOURS);
+
+    // the profile should have been removed from the expired cache
+    assertEquals(0, distributor.flushExpired().size());
+  }
+
+  /**
+   * An implementation of Ticker that can be used to drive time
+   * when testing the Guava caches.
+   */
+  private class FixedTicker extends Ticker {
+
+    /**
+     * The time that will be reported.
+     */
+    private long timestampNanos;
+
+    public FixedTicker() {
+      this.timestampNanos = Ticker.systemTicker().read();
+    }
+
+    public FixedTicker startAt(long timestampNanos) {
+      this.timestampNanos = timestampNanos;
+      return this;
+    }
+
+    public FixedTicker advanceTime(long time, TimeUnit units) {
+      this.timestampNanos += units.toNanos(time);
+      return this;
+    }
+
+    @Override
+    public long read() {
+      return this.timestampNanos;
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultProfileBuilderTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultProfileBuilderTest.java b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultProfileBuilderTest.java
index d25b7ff..24eb5f8 100644
--- a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultProfileBuilderTest.java
+++ b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/DefaultProfileBuilderTest.java
@@ -23,8 +23,6 @@ package org.apache.metron.profiler;
 import org.adrianwalker.multilinestring.Multiline;
 import org.apache.metron.common.configuration.profiler.ProfileConfig;
 import org.apache.metron.common.utils.JSONUtils;
-import org.apache.metron.profiler.clock.Clock;
-import org.apache.metron.profiler.clock.FixedClock;
 import org.apache.metron.stellar.dsl.Context;
 import org.json.simple.JSONObject;
 import org.json.simple.parser.JSONParser;
@@ -82,7 +80,9 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testInit() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testInitProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -92,7 +92,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // execute
-    builder.apply(message);
+    builder.apply(message, timestamp);
     Optional<ProfileMeasurement> m = builder.flush();
     assertTrue(m.isPresent());
 
@@ -106,7 +106,9 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testInitWithNoMessage() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testInitProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -146,7 +148,9 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testUpdate() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testUpdateProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -158,7 +162,12 @@ public class DefaultProfileBuilderTest {
     // execute
     int count = 10;
     for(int i=0; i<count; i++) {
-      builder.apply(message);
+
+      // apply the message
+      builder.apply(message, timestamp);
+
+      // advance time
+      timestamp += 5;
     }
     Optional<ProfileMeasurement> m = builder.flush();
     assertTrue(m.isPresent());
@@ -183,7 +192,9 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testResult() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testResultProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -193,7 +204,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // execute
-    builder.apply(message);
+    builder.apply(message, timestamp);
     Optional<ProfileMeasurement> m = builder.flush();
     assertTrue(m.isPresent());
 
@@ -206,40 +217,38 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testProfilePeriodOnFlush() throws Exception {
-    // setup
-    FixedClock clock = new FixedClock();
-    clock.setTime(100);
 
+    // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testResultProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
             .withEntity("10.0.0.1")
             .withPeriodDuration(10, TimeUnit.MINUTES)
             .withContext(Context.EMPTY_CONTEXT())
-            .withClock(clock)
             .build();
 
     {
       // apply a message and flush
-      builder.apply(message);
+      builder.apply(message, timestamp);
       Optional<ProfileMeasurement> m = builder.flush();
       assertTrue(m.isPresent());
 
       // validate the profile period
-      ProfilePeriod expected = new ProfilePeriod(clock.currentTimeMillis(), 10, TimeUnit.MINUTES);
+      ProfilePeriod expected = new ProfilePeriod(timestamp, 10, TimeUnit.MINUTES);
       assertEquals(expected, m.get().getPeriod());
     }
     {
-      // advance time by at least one period - 10 minutes
-      clock.setTime(clock.currentTimeMillis() + TimeUnit.MINUTES.toMillis(10));
+      // advance time by at least one period... about 10 minutes
+      timestamp += TimeUnit.MINUTES.toMillis(10);
 
       // apply a message and flush again
-      builder.apply(message);
+      builder.apply(message, timestamp);
       Optional<ProfileMeasurement> m = builder.flush();
       assertTrue(m.isPresent());
 
       // validate the profile period
-      ProfilePeriod expected = new ProfilePeriod(clock.currentTimeMillis(), 10, TimeUnit.MINUTES);
+      ProfilePeriod expected = new ProfilePeriod(timestamp, 10, TimeUnit.MINUTES);
       assertEquals(expected, m.get().getPeriod());
     }
   }
@@ -262,7 +271,9 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testGroupBy() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testGroupByProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -272,7 +283,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // execute
-    builder.apply(message);
+    builder.apply(message, timestamp);
     Optional<ProfileMeasurement> m = builder.flush();
     assertTrue(m.isPresent());
 
@@ -300,23 +311,20 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testStateAvailableToGroupBy() throws Exception {
-    FixedClock clock = new FixedClock();
-    clock.setTime(1503081070340L);
-    long periodDurationMillis = TimeUnit.MINUTES.toMillis(10);
-    ProfilePeriod period = new ProfilePeriod(clock.currentTimeMillis(), 10, TimeUnit.MINUTES);
 
     // setup
+    long timestamp = 1503081070340L;
+    ProfilePeriod period = new ProfilePeriod(timestamp, 10, TimeUnit.MINUTES);
     definition = JSONUtils.INSTANCE.load(testStateAvailableToGroupBy, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
             .withEntity("10.0.0.1")
             .withPeriodDuration(10, TimeUnit.MINUTES)
             .withContext(Context.EMPTY_CONTEXT())
-            .withClock(clock)
             .build();
 
     // execute
-    builder.apply(message);
+    builder.apply(message, timestamp);
     Optional<ProfileMeasurement> m = builder.flush();
     assertTrue(m.isPresent());
 
@@ -350,7 +358,9 @@ public class DefaultProfileBuilderTest {
 
   @Test
   public void testFlushDoesNotClearsState() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testFlushProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -362,16 +372,24 @@ public class DefaultProfileBuilderTest {
     // execute - accumulate some state then flush it
     int count = 10;
     for(int i=0; i<count; i++) {
-      builder.apply(message);
+
+      // apply the message
+      builder.apply(message, timestamp);
+
+      // advance time
+      timestamp += 5;
     }
     builder.flush();
 
+    // advance time beyond the current period
+    timestamp += TimeUnit.MINUTES.toMillis(20);
+
     // apply another message to accumulate new state, then flush again to validate original state was cleared
-    builder.apply(message);
+    builder.apply(message, timestamp);
     Optional<ProfileMeasurement> m = builder.flush();
-    assertTrue(m.isPresent());
 
     // validate
+    assertTrue(m.isPresent());
     assertEquals(33, m.get().getProfileValue());
   }
 
@@ -395,7 +413,9 @@ public class DefaultProfileBuilderTest {
 
   @Test
   public void testFlushDoesNotClearsStateButInitDoes() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testFlushProfileWithNaiveInit, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -407,18 +427,27 @@ public class DefaultProfileBuilderTest {
     // execute - accumulate some state then flush it
     int count = 10;
     for(int i=0; i<count; i++) {
-      builder.apply(message);
+
+      // apply a message
+      builder.apply(message, timestamp);
+
+      // advance time
+      timestamp += 5;
     }
     builder.flush();
 
+    // advance time beyond the current period
+    timestamp += TimeUnit.MINUTES.toMillis(20);
+
     // apply another message to accumulate new state, then flush again to validate original state was cleared
-    builder.apply(message);
+    builder.apply(message, timestamp);
     Optional<ProfileMeasurement> m = builder.flush();
     assertTrue(m.isPresent());
 
     // validate
     assertEquals(3, m.get().getProfileValue());
   }
+
   /**
    * {
    *   "profile": "test",
@@ -434,7 +463,9 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testEntity() throws Exception {
+
     // setup
+    long timestamp = 100;
     final String entity = "10.0.0.1";
     definition = JSONUtils.INSTANCE.load(testFlushProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
@@ -445,7 +476,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // execute
-    builder.apply(message);
+    builder.apply(message, timestamp);
     Optional<ProfileMeasurement> m = builder.flush();
     assertTrue(m.isPresent());
 
@@ -473,7 +504,9 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testResultWithProfileExpression() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testResultWithProfileExpression, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -483,7 +516,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // execute
-    builder.apply(message);
+    builder.apply(message, timestamp);
     Optional<ProfileMeasurement> m = builder.flush();
     assertTrue(m.isPresent());
 
@@ -515,7 +548,9 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testResultWithTriageExpression() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(testResultWithTriageExpression, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -525,7 +560,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // execute
-    builder.apply(message);
+    builder.apply(message, timestamp);
     Optional<ProfileMeasurement> m = builder.flush();
     assertTrue(m.isPresent());
 
@@ -550,7 +585,9 @@ public class DefaultProfileBuilderTest {
 
   @Test
   public void testBadInitExpression() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(badInitProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -560,7 +597,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // due to the bad expression, there should be no result
-    builder.apply(message);
+    builder.apply(message, timestamp);
     assertFalse(builder.flush().isPresent());
   }
 
@@ -579,7 +616,9 @@ public class DefaultProfileBuilderTest {
 
   @Test
   public void testBadResultExpression() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(badSimpleResultProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -589,7 +628,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // due to the bad expression, there should be no result
-    builder.apply(message);
+    builder.apply(message, timestamp);
     assertFalse(builder.flush().isPresent());
   }
 
@@ -608,7 +647,9 @@ public class DefaultProfileBuilderTest {
 
   @Test
   public void testBadGroupByExpression() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(badGroupByProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -618,7 +659,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // due to the bad expression, there should be no result
-    builder.apply(message);
+    builder.apply(message, timestamp);
     assertFalse(builder.flush().isPresent());
   }
 
@@ -641,7 +682,9 @@ public class DefaultProfileBuilderTest {
 
   @Test
   public void testBadResultProfileExpression() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(badResultProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -651,7 +694,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // due to the bad expression, there should be no result
-    builder.apply(message);
+    builder.apply(message, timestamp);
     assertFalse(builder.flush().isPresent());
   }
 
@@ -674,7 +717,9 @@ public class DefaultProfileBuilderTest {
 
   @Test
   public void testBadResultTriageExpression() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(badResultTriage, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -684,7 +729,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // due to the bad expression, there should be no result
-    builder.apply(message);
+    builder.apply(message, timestamp);
     assertFalse(builder.flush().isPresent());
   }
 
@@ -707,7 +752,9 @@ public class DefaultProfileBuilderTest {
    */
   @Test
   public void testBadUpdateExpression() throws Exception {
+
     // setup
+    long timestamp = 100;
     definition = JSONUtils.INSTANCE.load(badUpdateProfile, ProfileConfig.class);
     builder = new DefaultProfileBuilder.Builder()
             .withDefinition(definition)
@@ -717,7 +764,7 @@ public class DefaultProfileBuilderTest {
             .build();
 
     // execute
-    builder.apply(message);
+    builder.apply(message, timestamp);
 
     // if the update expression fails, the profile should still flush.
     Optional<ProfileMeasurement> m = builder.flush();

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/ProfilePeriodTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/ProfilePeriodTest.java b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/ProfilePeriodTest.java
index 3a51ea4..1a72111 100644
--- a/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/ProfilePeriodTest.java
+++ b/metron-analytics/metron-profiler-common/src/test/java/org/apache/metron/profiler/ProfilePeriodTest.java
@@ -20,7 +20,6 @@
 
 package org.apache.metron.profiler;
 
-import org.apache.metron.profiler.ProfilePeriod;
 import org.junit.Test;
 
 import java.util.concurrent.TimeUnit;


[49/50] [abbrv] metron git commit: Merge remote-tracking branch 'origin/master' into feature/METRON-1416-upgrade-solr

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrSearchDaoTest.java
----------------------------------------------------------------------
diff --cc metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrSearchDaoTest.java
index 762a272,0000000..9f2414a
mode 100644,000000..100644
--- a/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrSearchDaoTest.java
+++ b/metron-platform/metron-solr/src/test/java/org/apache/metron/solr/dao/SolrSearchDaoTest.java
@@@ -1,478 -1,0 +1,478 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.metron.solr.dao;
 +
 +import org.apache.metron.common.Constants;
 +import org.apache.metron.indexing.dao.AccessConfig;
 +import org.apache.metron.indexing.dao.search.GetRequest;
 +import org.apache.metron.indexing.dao.search.Group;
 +import org.apache.metron.indexing.dao.search.GroupOrder;
 +import org.apache.metron.indexing.dao.search.GroupRequest;
 +import org.apache.metron.indexing.dao.search.GroupResponse;
 +import org.apache.metron.indexing.dao.search.GroupResult;
 +import org.apache.metron.indexing.dao.search.InvalidSearchException;
 +import org.apache.metron.indexing.dao.search.SearchRequest;
 +import org.apache.metron.indexing.dao.search.SearchResponse;
 +import org.apache.metron.indexing.dao.search.SearchResult;
 +import org.apache.metron.indexing.dao.search.SortField;
 +import org.apache.metron.indexing.dao.update.Document;
 +import org.apache.metron.solr.matcher.ModifiableSolrParamsMatcher;
 +import org.apache.metron.solr.matcher.SolrQueryMatcher;
 +import org.apache.solr.client.solrj.SolrClient;
 +import org.apache.solr.client.solrj.SolrQuery;
 +import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 +import org.apache.solr.client.solrj.response.FacetField;
 +import org.apache.solr.client.solrj.response.FieldStatsInfo;
 +import org.apache.solr.client.solrj.response.PivotField;
 +import org.apache.solr.client.solrj.response.QueryResponse;
 +import org.apache.solr.common.SolrDocument;
 +import org.apache.solr.common.SolrDocumentList;
 +import org.apache.solr.common.params.ModifiableSolrParams;
 +import org.apache.solr.common.util.NamedList;
 +import org.junit.Before;
 +import org.junit.Rule;
 +import org.junit.Test;
 +import org.junit.rules.ExpectedException;
 +import org.junit.runner.RunWith;
 +import org.powermock.core.classloader.annotations.PrepareForTest;
 +import org.powermock.modules.junit4.PowerMockRunner;
 +
 +import java.util.Arrays;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Optional;
 +
 +import static org.hamcrest.MatcherAssert.assertThat;
 +import static org.hamcrest.core.IsCollectionContaining.hasItems;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertNull;
 +import static org.mockito.Matchers.any;
 +import static org.mockito.Matchers.argThat;
 +import static org.mockito.Mockito.doReturn;
 +import static org.mockito.Mockito.mock;
 +import static org.mockito.Mockito.spy;
 +import static org.mockito.Mockito.verify;
 +import static org.mockito.Mockito.verifyNoMoreInteractions;
 +import static org.mockito.Mockito.when;
 +import static org.powermock.api.mockito.PowerMockito.mockStatic;
 +
 +@RunWith(PowerMockRunner.class)
 +@PrepareForTest({CollectionAdminRequest.class})
 +public class SolrSearchDaoTest {
 +
 +  @Rule
 +  public final ExpectedException exception = ExpectedException.none();
 +
 +  private SolrClient client;
 +  private AccessConfig accessConfig;
 +  private SolrSearchDao solrSearchDao;
 +
 +  @SuppressWarnings("unchecked")
 +  @Before
 +  public void setUp() throws Exception {
 +    client = mock(SolrClient.class);
 +    accessConfig = mock(AccessConfig.class);
 +    solrSearchDao = new SolrSearchDao(client, accessConfig);
 +    mockStatic(CollectionAdminRequest.class);
 +    when(CollectionAdminRequest.listCollections(client)).thenReturn(Arrays.asList("bro", "snort"));
 +  }
 +
 +  @Test
 +  public void searchShouldProperlyReturnSearchResponse() throws Exception {
 +    SearchRequest searchRequest = mock(SearchRequest.class);
 +    SearchResponse searchResponse = mock(SearchResponse.class);
 +    SolrQuery solrQuery = mock(SolrQuery.class);
 +    QueryResponse queryResponse = mock(QueryResponse.class);
 +
 +    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
 +    when(searchRequest.getQuery()).thenReturn("query");
 +    doReturn(solrQuery).when(solrSearchDao).buildSearchRequest(searchRequest);
 +    when(client.query(solrQuery)).thenReturn(queryResponse);
 +    doReturn(searchResponse).when(solrSearchDao).buildSearchResponse(searchRequest, queryResponse);
 +
 +    assertEquals(searchResponse, solrSearchDao.search(searchRequest));
 +    verify(solrSearchDao).buildSearchRequest(searchRequest);
 +    verify(client).query(solrQuery);
 +    verify(solrSearchDao).buildSearchResponse(searchRequest, queryResponse);
 +    verifyNoMoreInteractions(client);
 +  }
 +
 +  @Test
 +  public void searchShouldThrowInvalidSearchExceptionOnEmptyQuery() throws Exception {
 +    exception.expect(InvalidSearchException.class);
 +    exception.expectMessage("Search query is invalid: null");
 +
 +    solrSearchDao.search(new SearchRequest());
 +  }
 +
 +  @Test
 +  public void searchShouldThrowInvalidSearchExceptionOnEmptyClient() throws Exception {
 +    exception.expect(InvalidSearchException.class);
 +    exception.expectMessage("Uninitialized Dao!  You must call init() prior to use.");
 +
 +    SearchRequest searchRequest = new SearchRequest();
 +    searchRequest.setQuery("query");
 +    new SolrSearchDao(null, accessConfig).search(searchRequest);
 +  }
 +
 +  @Test
 +  public void searchShouldThrowSearchResultSizeException() throws Exception {
 +    exception.expect(InvalidSearchException.class);
 +    exception.expectMessage("Search result size must be less than 100");
 +
 +    when(accessConfig.getMaxSearchResults()).thenReturn(100);
 +    SearchRequest searchRequest = new SearchRequest();
 +    searchRequest.setQuery("query");
 +    searchRequest.setSize(200);
 +    solrSearchDao.search(searchRequest);
 +  }
 +
 +  @Test
 +  public void groupShouldProperlyReturnGroupResponse() throws Exception {
 +    GroupRequest groupRequest = mock(GroupRequest.class);
 +    QueryResponse queryResponse = mock(QueryResponse.class);
 +    GroupResponse groupResponse = mock(GroupResponse.class);
 +
 +    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
 +    Group group1 = new Group();
 +    group1.setField("field1");
 +    Group group2 = new Group();
 +    group2.setField("field2");
 +    when(groupRequest.getQuery()).thenReturn("query");
 +    when(groupRequest.getGroups()).thenReturn(Arrays.asList(group1, group2));
 +    when(groupRequest.getScoreField()).thenReturn(Optional.of("scoreField"));
 +    when(groupRequest.getIndices()).thenReturn(Arrays.asList("bro", "snort"));
 +    when(client.query(any())).thenReturn(queryResponse);
 +    doReturn(groupResponse).when(solrSearchDao).buildGroupResponse(groupRequest, queryResponse);
 +    SolrQuery expectedSolrQuery = new SolrQuery()
 +            .setStart(0)
 +            .setRows(0)
 +            .setQuery("query");
 +    expectedSolrQuery.set("collection", "bro,snort");
 +    expectedSolrQuery.set("stats", true);
 +    expectedSolrQuery.set("stats.field", "{!tag=piv1 sum=true}scoreField");
 +    expectedSolrQuery.set("facet", true);
 +    expectedSolrQuery.set("facet.pivot", "{!stats=piv1}field1,field2");
 +
 +    assertEquals(groupResponse, solrSearchDao.group(groupRequest));
 +    verify(client).query(argThat(new SolrQueryMatcher(expectedSolrQuery)));
 +    verify(solrSearchDao).buildGroupResponse(groupRequest, queryResponse);
 +
 +    verifyNoMoreInteractions(client);
 +  }
 +
 +  @Test
 +  public void getLatestShouldProperlyReturnDocument() throws Exception {
 +    SolrDocument solrDocument = mock(SolrDocument.class);
 +    Document document = mock(Document.class);
 +
 +    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
 +    when(client.getById("collection", "guid")).thenReturn(solrDocument);
 +    doReturn(document).when(solrSearchDao).toDocument(solrDocument);
 +
 +    assertEquals(document, solrSearchDao.getLatest("guid", "collection"));
 +
 +    verify(client).getById("collection", "guid");
 +    verify(solrSearchDao).toDocument(solrDocument);
 +    verifyNoMoreInteractions(client);
 +  }
 +
 +  @Test
 +  public void getAllLatestShouldProperlyReturnDocuments() throws Exception {
 +    GetRequest broRequest1 = new GetRequest("bro-1", "bro");
 +    GetRequest broRequest2 = new GetRequest("bro-2", "bro");
 +    GetRequest snortRequest1 = new GetRequest("snort-1", "snort");
 +    GetRequest snortRequest2 = new GetRequest("snort-2", "snort");
 +    SolrDocument broSolrDoc1 = mock(SolrDocument.class);
 +    SolrDocument broSolrDoc2 = mock(SolrDocument.class);
 +    SolrDocument snortSolrDoc1 = mock(SolrDocument.class);
 +    SolrDocument snortSolrDoc2 = mock(SolrDocument.class);
 +    Document broDoc1 = mock(Document.class);
 +    Document broDoc2 = mock(Document.class);
 +    Document snortDoc1 = mock(Document.class);
 +    Document snortDoc2 = mock(Document.class);
 +
 +    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
 +    doReturn(broDoc1).when(solrSearchDao).toDocument(broSolrDoc1);
 +    doReturn(broDoc2).when(solrSearchDao).toDocument(broSolrDoc2);
 +    doReturn(snortDoc1).when(solrSearchDao).toDocument(snortSolrDoc1);
 +    doReturn(snortDoc2).when(solrSearchDao).toDocument(snortSolrDoc2);
 +    SolrDocumentList broList = new SolrDocumentList();
 +    broList.add(broSolrDoc1);
 +    broList.add(broSolrDoc2);
 +    SolrDocumentList snortList = new SolrDocumentList();
 +    snortList.add(snortSolrDoc1);
 +    snortList.add(snortSolrDoc2);
 +    when(client.getById((Collection<String>) argThat(hasItems("bro-1", "bro-2")),
 +            argThat(new ModifiableSolrParamsMatcher(new ModifiableSolrParams().set("collection", "bro"))))).thenReturn(broList);
 +    when(client.getById((Collection<String>) argThat(hasItems("snort-1", "snort-2")),
 +            argThat(new ModifiableSolrParamsMatcher(new ModifiableSolrParams().set("collection", "snort"))))).thenReturn(snortList);
 +    assertEquals(Arrays.asList(broDoc1, broDoc2, snortDoc1, snortDoc2), solrSearchDao.getAllLatest(Arrays.asList(broRequest1, broRequest2, snortRequest1, snortRequest2)));
 +  }
 +
 +  @Test
 +  public void buildSearchRequestShouldReturnSolrQuery() throws Exception {
 +    SearchRequest searchRequest = new SearchRequest();
 +    searchRequest.setIndices(Arrays.asList("bro", "snort"));
 +    searchRequest.setSize(5);
 +    searchRequest.setFrom(10);
 +    searchRequest.setQuery("query");
 +    SortField sortField = new SortField();
 +    sortField.setField("sortField");
 +    sortField.setSortOrder("ASC");
 +    searchRequest.setSort(Collections.singletonList(sortField));
 +    searchRequest.setFields(Arrays.asList("field1", "field2"));
 +    searchRequest.setFacetFields(Arrays.asList("facetField1", "facetField2"));
 +
 +    SolrQuery exceptedSolrQuery = new SolrQuery()
 +            .setStart(10)
 +            .setRows(5)
 +            .setQuery("query")
 +            .addSort("sortField", SolrQuery.ORDER.asc)
 +            .addField("field1").addField("field2")
 +            .addFacetField("facetField1", "facetField2");
 +    exceptedSolrQuery.set("collection", "bro,snort");
 +
 +    SolrQuery solrQuery = solrSearchDao.buildSearchRequest(searchRequest);
 +    assertThat(solrQuery, new SolrQueryMatcher(exceptedSolrQuery));
 +  }
 +
 +  @Test
 +  public void buildSearchResponseShouldReturnSearchResponse() throws Exception {
 +    SearchRequest searchRequest = new SearchRequest();
 +    searchRequest.setFields(Collections.singletonList("id"));
 +    searchRequest.setFacetFields(Collections.singletonList("facetField"));
 +    QueryResponse queryResponse = mock(QueryResponse.class);
 +    SolrDocument solrDocument1 = mock(SolrDocument.class);
 +    SolrDocument solrDocument2 = mock(SolrDocument.class);
 +
 +    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
 +    SolrDocumentList solrDocumentList = new SolrDocumentList();
 +    solrDocumentList.add(solrDocument1);
 +    solrDocumentList.add(solrDocument2);
 +    solrDocumentList.setNumFound(100);
 +    when(queryResponse.getResults()).thenReturn(solrDocumentList);
 +    SearchResult searchResult1 = new SearchResult();
 +    searchResult1.setId("id1");
 +    SearchResult searchResult2 = new SearchResult();
 +    searchResult2.setId("id2");
 +    doReturn(searchResult1).when(solrSearchDao).getSearchResult(solrDocument1,
-             Optional.of(Collections.singletonList("id")));
++            Collections.singletonList("id"));
 +    doReturn(searchResult2).when(solrSearchDao).getSearchResult(solrDocument2,
-             Optional.of(Collections.singletonList("id")));
++            Collections.singletonList("id"));
 +    Map<String, Map<String, Long>> facetCounts = new HashMap<String, Map<String, Long>>() {{
 +      put("id", new HashMap<String, Long>() {{
 +        put("id1", 1L);
 +        put("id2", 1L);
 +      }});
 +    }};
 +    doReturn(facetCounts).when(solrSearchDao).getFacetCounts(Collections.singletonList("facetField"), queryResponse);
 +    SearchResponse expectedSearchResponse = new SearchResponse();
 +    SearchResult expectedSearchResult1 = new SearchResult();
 +    expectedSearchResult1.setId("id1");
 +    SearchResult expectedSearchResult2 = new SearchResult();
 +    expectedSearchResult2.setId("id2");
 +    expectedSearchResponse.setResults(Arrays.asList(expectedSearchResult1, expectedSearchResult2));
 +    expectedSearchResponse.setTotal(100);
 +    expectedSearchResponse.setFacetCounts(facetCounts);
 +
 +    assertEquals(expectedSearchResponse, solrSearchDao.buildSearchResponse(searchRequest, queryResponse));
 +  }
 +
 +  @Test
 +  public void getSearchResultShouldProperlyReturnResults() throws Exception {
 +    SolrDocument solrDocument = mock(SolrDocument.class);
 +
 +    when(solrDocument.getFieldValue(Constants.GUID)).thenReturn("guid");
 +    when(solrDocument.getFieldValue("field1")).thenReturn("value1");
 +    when(solrDocument.getFieldValue("field2")).thenReturn("value2");
 +    when(solrDocument.getFieldNames()).thenReturn(Arrays.asList("field1", "field2"));
 +
 +    SearchResult expectedSearchResult = new SearchResult();
 +    expectedSearchResult.setId("guid");
 +    expectedSearchResult.setSource(new HashMap<String, Object>() {{
 +      put("field1", "value1");
 +    }});
 +
 +    assertEquals(expectedSearchResult, solrSearchDao.getSearchResult(solrDocument,
-             Optional.of(Collections.singletonList("field1"))));
++            Collections.singletonList("field1")));
 +
 +    SearchResult expectedSearchResultAllFields = new SearchResult();
 +    expectedSearchResultAllFields.setId("guid");
 +    expectedSearchResultAllFields.setSource(new HashMap<String, Object>() {{
 +      put("field1", "value1");
 +      put("field2", "value2");
 +    }});
 +
-     assertEquals(expectedSearchResultAllFields, solrSearchDao.getSearchResult(solrDocument, Optional.empty()));
++    assertEquals(expectedSearchResultAllFields, solrSearchDao.getSearchResult(solrDocument, null));
 +  }
 +
 +  @Test
 +  public void getFacetCountsShouldProperlyReturnFacetCounts() throws Exception {
 +    QueryResponse queryResponse = mock(QueryResponse.class);
 +
 +    FacetField facetField1 = new FacetField("field1");
 +    facetField1.add("value1", 1);
 +    facetField1.add("value2", 2);
 +    FacetField facetField2 = new FacetField("field2");
 +    facetField2.add("value3", 3);
 +    facetField2.add("value4", 4);
 +    when(queryResponse.getFacetField("field1")).thenReturn(facetField1);
 +    when(queryResponse.getFacetField("field2")).thenReturn(facetField2);
 +
 +    Map<String, Map<String, Long>> expectedFacetCounts = new HashMap<String, Map<String, Long>>() {{
 +      put("field1", new HashMap<String, Long>() {{
 +        put("value1", 1L);
 +        put("value2", 2L);
 +      }});
 +      put("field2", new HashMap<String, Long>() {{
 +        put("value3", 3L);
 +        put("value4", 4L);
 +      }});
 +    }};
 +
 +    assertEquals(expectedFacetCounts, solrSearchDao.getFacetCounts(Arrays.asList("field1", "field2"), queryResponse));
 +  }
 +
 +  @Test
 +  public void buildGroupResponseShouldProperlyReturnGroupReponse() throws Exception {
 +    GroupRequest groupRequest = mock(GroupRequest.class);
 +    QueryResponse queryResponse = mock(QueryResponse.class);
 +    NamedList namedList = mock(NamedList.class);
 +    List pivotFields = mock(List.class);
 +    List groupResults = mock(List.class);
 +
 +    solrSearchDao = spy(new SolrSearchDao(client, accessConfig));
 +    Group group1 = new Group();
 +    group1.setField("field1");
 +    Group group2 = new Group();
 +    group2.setField("field2");
 +    when(groupRequest.getGroups()).thenReturn(Arrays.asList(group1, group2));
 +    when(queryResponse.getFacetPivot()).thenReturn(namedList);
 +    when(namedList.get("field1,field2")).thenReturn(pivotFields);
 +    doReturn(groupResults).when(solrSearchDao).getGroupResults(groupRequest, 0, pivotFields);
 +
 +    GroupResponse groupResponse = solrSearchDao.buildGroupResponse(groupRequest, queryResponse);
 +    assertEquals("field1", groupResponse.getGroupedBy());
 +    verify(namedList).get("field1,field2");
 +    verify(solrSearchDao).getGroupResults(groupRequest, 0, pivotFields);
 +
 +  }
 +
 +  @Test
 +  public void getGroupResultsShouldProperlyReturnGroupResults() throws Exception {
 +    GroupRequest groupRequest = new GroupRequest();
 +    Group group1 = new Group();
 +    group1.setField("field1");
 +    GroupOrder groupOrder1 = new GroupOrder();
 +    groupOrder1.setSortOrder("ASC");
 +    groupOrder1.setGroupOrderType("TERM");
 +    group1.setOrder(groupOrder1);
 +    Group group2 = new Group();
 +    group2.setField("field2");
 +    GroupOrder groupOrder2 = new GroupOrder();
 +    groupOrder2.setSortOrder("DESC");
 +    groupOrder2.setGroupOrderType("COUNT");
 +    group2.setOrder(groupOrder2);
 +    groupRequest.setGroups(Arrays.asList(group1, group2));
 +    groupRequest.setScoreField("scoreField");
 +
 +    PivotField level1Pivot1 = mock(PivotField.class);
 +    PivotField level1Pivot2 = mock(PivotField.class);
 +    PivotField level2Pivot1 = mock(PivotField.class);
 +    PivotField level2Pivot2 = mock(PivotField.class);
 +    FieldStatsInfo level1Pivot1FieldStatsInfo = mock(FieldStatsInfo.class);
 +    FieldStatsInfo level1Pivot2FieldStatsInfo = mock(FieldStatsInfo.class);
 +    FieldStatsInfo level2Pivot1FieldStatsInfo = mock(FieldStatsInfo.class);
 +    FieldStatsInfo level2Pivot2FieldStatsInfo = mock(FieldStatsInfo.class);
 +    List<PivotField> level1Pivots = Arrays.asList(level1Pivot1, level1Pivot2);
 +    List<PivotField> level2Pivots = Arrays.asList(level2Pivot1, level2Pivot2);
 +
 +    when(level1Pivot1.getValue()).thenReturn("field1value1");
 +    when(level1Pivot1.getCount()).thenReturn(1);
 +    when(level1Pivot1FieldStatsInfo.getSum()).thenReturn(1.0);
 +    when(level1Pivot1.getFieldStatsInfo()).thenReturn(new HashMap<String, FieldStatsInfo>(){{
 +      put("score", level1Pivot1FieldStatsInfo);
 +    }});
 +    when(level1Pivot2.getValue()).thenReturn("field1value2");
 +    when(level1Pivot2.getCount()).thenReturn(2);
 +    when(level1Pivot2FieldStatsInfo.getSum()).thenReturn(2.0);
 +    when(level1Pivot2.getFieldStatsInfo()).thenReturn(new HashMap<String, FieldStatsInfo>(){{
 +      put("score", level1Pivot2FieldStatsInfo);
 +    }});
 +    when(level2Pivot1.getValue()).thenReturn("field2value1");
 +    when(level2Pivot1.getCount()).thenReturn(3);
 +    when(level2Pivot1FieldStatsInfo.getSum()).thenReturn(3.0);
 +    when(level2Pivot1.getFieldStatsInfo()).thenReturn(new HashMap<String, FieldStatsInfo>(){{
 +      put("score", level2Pivot1FieldStatsInfo);
 +    }});
 +    when(level2Pivot2.getValue()).thenReturn("field2value2");
 +    when(level2Pivot2.getCount()).thenReturn(4);
 +    when(level2Pivot2FieldStatsInfo.getSum()).thenReturn(4.0);
 +    when(level2Pivot2.getFieldStatsInfo()).thenReturn(new HashMap<String, FieldStatsInfo>(){{
 +      put("score", level2Pivot2FieldStatsInfo);
 +    }});
 +    when(level1Pivot1.getPivot()).thenReturn(level2Pivots);
 +
 +    List<GroupResult> level1GroupResults = solrSearchDao.getGroupResults(groupRequest, 0, level1Pivots);
 +
 +    assertEquals("field1value1", level1GroupResults.get(0).getKey());
 +    assertEquals(1, level1GroupResults.get(0).getTotal());
 +    assertEquals(1.0, level1GroupResults.get(0).getScore(), 0.00001);
 +    assertEquals("field2", level1GroupResults.get(0).getGroupedBy());
 +    assertEquals("field1value2", level1GroupResults.get(1).getKey());
 +    assertEquals(2, level1GroupResults.get(1).getTotal());
 +    assertEquals(2.0, level1GroupResults.get(1).getScore(), 0.00001);
 +    assertEquals("field2", level1GroupResults.get(1).getGroupedBy());
 +    assertEquals(0, level1GroupResults.get(1).getGroupResults().size());
 +
 +    List<GroupResult> level2GroupResults = level1GroupResults.get(0).getGroupResults();
 +    assertEquals("field2value2", level2GroupResults.get(0).getKey());
 +    assertEquals(4, level2GroupResults.get(0).getTotal());
 +    assertEquals(4.0, level2GroupResults.get(0).getScore(), 0.00001);
 +    assertNull(level2GroupResults.get(0).getGroupedBy());
 +    assertNull(level2GroupResults.get(0).getGroupResults());
 +    assertEquals("field2value1", level2GroupResults.get(1).getKey());
 +    assertEquals(3, level2GroupResults.get(1).getTotal());
 +    assertEquals(3.0, level2GroupResults.get(1).getScore(), 0.00001);
 +    assertNull(level2GroupResults.get(1).getGroupedBy());
 +    assertNull(level2GroupResults.get(1).getGroupResults());
 +  }
 +
 +  @Test
 +  public void toDocumentShouldProperlyReturnDocument() throws Exception {
 +    SolrDocument solrDocument = new SolrDocument();
 +    solrDocument.addField(SolrDao.VERSION_FIELD, 1.0);
 +    solrDocument.addField(Constants.GUID, "guid");
 +    solrDocument.addField(Constants.SENSOR_TYPE, "bro");
 +    solrDocument.addField("field", "value");
 +
 +    Document expectedDocument = new Document(new HashMap<String, Object>(){{
 +      put("field", "value");
 +      put(Constants.GUID, "guid");
 +      put(Constants.SENSOR_TYPE, "bro");
 +    }}, "guid", "bro", 0L);
 +
 +    Document actualDocument = solrSearchDao.toDocument(solrDocument);
 +    assertEquals(expectedDocument, actualDocument);
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/metron/blob/d0a4e4c0/pom.xml
----------------------------------------------------------------------


[08/50] [abbrv] metron git commit: METRON-1483: Create a tool to monitor performance of the topologies closes apache/incubator-metron#958

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/UnbiasedSampler.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/UnbiasedSampler.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/UnbiasedSampler.java
new file mode 100644
index 0000000..5d5c240
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/sampler/UnbiasedSampler.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.sampler;
+
+import java.util.Random;
+
+public class UnbiasedSampler implements Sampler {
+
+  @Override
+  public int sample(Random rng, int limit) {
+    return rng.nextInt(limit);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/util/KafkaUtil.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/util/KafkaUtil.java b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/util/KafkaUtil.java
new file mode 100644
index 0000000..c13f236
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/java/org/apache/metron/performance/util/KafkaUtil.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.util;
+
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.PartitionInfo;
+import org.apache.kafka.common.TopicPartition;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public enum KafkaUtil {
+  INSTANCE;
+
+  public List<TopicPartition> getTopicPartition(KafkaConsumer<String, String> consumer, String topic) {
+
+    List<PartitionInfo> partitions = consumer.partitionsFor(topic);
+    List<TopicPartition> ret = new ArrayList<>(partitions.size());
+    for(PartitionInfo par : partitions) {
+      ret.add(new TopicPartition(topic, par.partition()));
+    }
+    return ret;
+  }
+
+  public Map<Integer, Long> getKafkaOffsetMap(KafkaConsumer<String, String> consumer, String topic ) {
+    Map<Integer, Long> ret = new HashMap<>();
+    if(!consumer.subscription().contains(topic)) {
+      consumer.subscribe(Collections.singletonList(topic));
+    }
+    consumer.poll(0);
+    List<TopicPartition> partitions = getTopicPartition(consumer, topic);
+    consumer.seekToEnd(partitions);
+    for(TopicPartition par : partitions) {
+      ret.put(par.partition(), consumer.position(par)-1);
+    }
+    return ret;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/main/scripts/load_tool.sh
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/main/scripts/load_tool.sh b/metron-contrib/metron-performance/src/main/scripts/load_tool.sh
new file mode 100755
index 0000000..1b56a6e
--- /dev/null
+++ b/metron-contrib/metron-performance/src/main/scripts/load_tool.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+BIGTOP_DEFAULTS_DIR=${BIGTOP_DEFAULTS_DIR-/etc/default}
+[ -n "${BIGTOP_DEFAULTS_DIR}" -a -r ${BIGTOP_DEFAULTS_DIR}/hbase ] && . ${BIGTOP_DEFAULTS_DIR}/hbase
+
+# Autodetect JAVA_HOME if not defined
+if [ -e /usr/libexec/bigtop-detect-javahome ]; then
+  . /usr/libexec/bigtop-detect-javahome
+elif [ -e /usr/lib/bigtop-utils/bigtop-detect-javahome ]; then
+  . /usr/lib/bigtop-utils/bigtop-detect-javahome
+fi
+
+export METRON_VERSION=${project.version}
+export METRON_HOME=/usr/metron/$METRON_VERSION
+export CLASSNAME="org.apache.metron.performance.load.LoadGenerator"
+export GEN_JAR=${project.artifactId}-$METRON_VERSION.jar
+export PARSERS_JAR=metron-parsers-$METRON_VERSION-uber.jar
+
+java -cp $METRON_HOME/lib/$GEN_JAR:$METRON_HOME/lib/$PARSERS_JAR $CLASSNAME "$@"

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/load/LoadOptionsTest.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/load/LoadOptionsTest.java b/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/load/LoadOptionsTest.java
new file mode 100644
index 0000000..0d9b34a
--- /dev/null
+++ b/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/load/LoadOptionsTest.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.io.IOUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Optional;
+
+public class LoadOptionsTest {
+  @Test
+  public void testHappyPath() throws Exception {
+    CommandLine cli = LoadOptions.parse(new PosixParser(), new String[] { "-eps", "1000", "-ot","foo"});
+    EnumMap<LoadOptions, Optional<Object>> results = LoadOptions.createConfig(cli);
+    Assert.assertEquals(1000L, results.get(LoadOptions.EPS).get());
+    Assert.assertEquals("foo", results.get(LoadOptions.OUTPUT_TOPIC).get());
+    Assert.assertEquals(LoadGenerator.CONSUMER_GROUP, results.get(LoadOptions.CONSUMER_GROUP).get());
+    Assert.assertEquals(Runtime.getRuntime().availableProcessors(), results.get(LoadOptions.NUM_THREADS).get());
+    Assert.assertFalse(results.get(LoadOptions.BIASED_SAMPLE).isPresent());
+    Assert.assertFalse(results.get(LoadOptions.CSV).isPresent());
+  }
+
+  @Test
+  public void testCsvPresent() throws Exception {
+      CommandLine cli = LoadOptions.parse(new PosixParser(), new String[]{"-c", "/tmp/blah"});
+      EnumMap<LoadOptions, Optional<Object>> results = LoadOptions.createConfig(cli);
+      Assert.assertEquals(new File("/tmp/blah"), results.get(LoadOptions.CSV).get());
+  }
+
+  @Test
+  public void testCsvMissing() throws Exception {
+      CommandLine cli = LoadOptions.parse(new PosixParser(), new String[]{});
+      EnumMap<LoadOptions, Optional<Object>> results = LoadOptions.createConfig(cli);
+      Assert.assertFalse(results.get(LoadOptions.CSV).isPresent());
+  }
+
+  @Test
+  public void testThreadsByCores() throws Exception {
+      CommandLine cli = LoadOptions.parse(new PosixParser(), new String[]{"-p", "2C"});
+      EnumMap<LoadOptions, Optional<Object>> results = LoadOptions.createConfig(cli);
+      Assert.assertEquals(2 * Runtime.getRuntime().availableProcessors(), results.get(LoadOptions.NUM_THREADS).get());
+  }
+
+  @Test
+  public void testThreadsByNum() throws Exception {
+      CommandLine cli = LoadOptions.parse(new PosixParser(), new String[]{"-p", "5"});
+      EnumMap<LoadOptions, Optional<Object>> results = LoadOptions.createConfig(cli);
+      Assert.assertEquals(5, results.get(LoadOptions.NUM_THREADS).get());
+  }
+
+  @Test
+  public void testTemplatePresent() throws Exception {
+    File templateFile= new File("target/template");
+    String template = "test template1";
+    try(BufferedWriter w = new BufferedWriter(new FileWriter(templateFile))) {
+      IOUtils.write(template, w );
+    }
+    templateFile.deleteOnExit();
+    CommandLine cli = LoadOptions.parse(new PosixParser(), new String[]{"-t", templateFile.getPath()});
+    EnumMap<LoadOptions, Optional<Object>> results = LoadOptions.createConfig(cli);
+    List<String> templates = (List<String>) results.get(LoadOptions.TEMPLATE).get();
+    Assert.assertEquals(1, templates.size());
+    Assert.assertEquals(template, templates.get(0));
+  }
+
+  @Test(expected=IllegalStateException.class)
+  public void testTemplateMissing() throws Exception {
+    LoadOptions.createConfig(LoadOptions.parse(new PosixParser(), new String[]{"-t", "target/template2"}));
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/load/SendToKafkaTest.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/load/SendToKafkaTest.java b/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/load/SendToKafkaTest.java
new file mode 100644
index 0000000..c652291
--- /dev/null
+++ b/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/load/SendToKafkaTest.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.load;
+
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class SendToKafkaTest {
+
+  @Test
+  public void testWritesCorrectNumber() {
+    ExecutorService executor = ForkJoinPool.commonPool();
+    AtomicLong numSent = new AtomicLong(0);
+    long expectedSent = 100;
+    SendToKafka sender = new SendToKafka(null, expectedSent, 10, () -> "msg", executor, numSent, ThreadLocal.withInitial(() -> null) ) {
+      @Override
+      protected Future<?> sendToKafka(KafkaProducer producer, String kafkaTopic, String message) {
+        Assert.assertEquals(message, "msg");
+        return ForkJoinPool.commonPool().submit(() -> {
+          numSent.incrementAndGet();
+        });
+      }
+    };
+    sender.run();
+    Assert.assertEquals(numSent.get(), expectedSent);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/sampler/SamplerTest.java
----------------------------------------------------------------------
diff --git a/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/sampler/SamplerTest.java b/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/sampler/SamplerTest.java
new file mode 100644
index 0000000..d386e00
--- /dev/null
+++ b/metron-contrib/metron-performance/src/test/java/org/apache/metron/performance/sampler/SamplerTest.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.performance.sampler;
+
+import com.google.common.collect.ImmutableList;
+import org.adrianwalker.multilinestring.Multiline;
+import org.junit.Assert;
+import org.junit.Test;
+import sun.java2d.pipe.SpanShapeRenderer;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+public class SamplerTest {
+  private static final int SIMULATION_SIZE = 10000;
+  private void testSampler(Sampler sampler, Map<Integer, Double> expectedProbs) {
+    Random rng = new Random(0);
+    Map<Integer, Double> empiricalProbs = new HashMap<>();
+    for(int i = 0;i < SIMULATION_SIZE;++i) {
+      int sample = sampler.sample(rng, 10);
+      Double cnt = empiricalProbs.get(sample);
+      empiricalProbs.put(sample, ((cnt == null)?0:cnt) + 1);
+    }
+    for(Map.Entry<Integer, Double> kv : empiricalProbs.entrySet()) {
+      double empiricalProb = kv.getValue()/SIMULATION_SIZE;
+      String msg = expectedProbs.get(kv.getKey()) + " != " + empiricalProb;
+      Assert.assertEquals(msg, expectedProbs.get(kv.getKey()), empiricalProb, 1e-2);
+    }
+  }
+
+  @Test
+  public void testUnbiasedSampler() {
+    Sampler sampler = new UnbiasedSampler();
+    testSampler(sampler, new HashMap<Integer, Double>() {{
+      for(int i = 0;i < 10;++i) {
+        put(i, 0.1);
+      }
+    }});
+  }
+
+  @Test
+  public void testBiasedSampler() {
+    Sampler sampler = new BiasedSampler(
+            new ArrayList<Map.Entry<Integer, Integer>>() {{
+              add(new AbstractMap.SimpleEntry<>(30, 80));
+              add(new AbstractMap.SimpleEntry<>(70, 20));
+            }}
+            , 10
+            );
+    testSampler(sampler, new HashMap<Integer, Double>() {{
+      for(int i = 0;i < 3;++i) {
+        put(i, 0.8/3);
+      }
+      for(int i = 3;i < 10;++i) {
+        put(i, 0.2/7);
+      }
+    }});
+
+  }
+
+  /**
+   80,20
+   */
+  @Multiline
+  static String paretoConfigImplicit;
+
+  /**
+   80,20
+   20,80
+   */
+  @Multiline
+  static String paretoConfig;
+
+  @Test
+  public void testDistributionRead() throws IOException {
+    for(String config : ImmutableList.of(paretoConfig, paretoConfigImplicit)) {
+      List<Map.Entry<Integer, Integer>> endpoints = BiasedSampler.readDistribution(new BufferedReader(new StringReader(config)), true);
+      Assert.assertEquals(2, endpoints.size());
+      Assert.assertEquals(new AbstractMap.SimpleEntry<>(80,20), endpoints.get(0));
+      Assert.assertEquals(new AbstractMap.SimpleEntry<>(20,80), endpoints.get(1));
+    }
+  }
+
+  /**
+   80,20
+   10,70
+   10,10
+   */
+  @Multiline
+  static String longerConfig;
+  /**
+   80,20
+   10,70
+   */
+  @Multiline
+  static String longerConfigImplicit;
+
+  @Test
+  public void testDistributionReadLonger() throws IOException {
+    for(String config : ImmutableList.of(longerConfig, longerConfigImplicit)) {
+      List<Map.Entry<Integer, Integer>> endpoints = BiasedSampler.readDistribution(new BufferedReader(new StringReader(config)), true);
+      Assert.assertEquals(3, endpoints.size());
+      Assert.assertEquals(new AbstractMap.SimpleEntry<>(80,20), endpoints.get(0));
+      Assert.assertEquals(new AbstractMap.SimpleEntry<>(10,70), endpoints.get(1));
+      Assert.assertEquals(new AbstractMap.SimpleEntry<>(10,10), endpoints.get(2));
+    }
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testDistributionRead_garbage() throws IOException {
+    BiasedSampler.readDistribution(new BufferedReader(new StringReader("blah foo")), true);
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testDistributionRead_negative() throws IOException {
+    BiasedSampler.readDistribution(new BufferedReader(new StringReader("80,-20")), true);
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  public void testDistributionRead_over100() throws IOException {
+    BiasedSampler.readDistribution(new BufferedReader(new StringReader("200,20")), true);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-contrib/pom.xml
----------------------------------------------------------------------
diff --git a/metron-contrib/pom.xml b/metron-contrib/pom.xml
index cf28fac..bab7507 100644
--- a/metron-contrib/pom.xml
+++ b/metron-contrib/pom.xml
@@ -40,5 +40,20 @@
     </licenses>
     <modules>
         <module>metron-docker</module>
+        <module>metron-performance</module>
     </modules>
+    <dependencies>
+        <dependency>
+			<groupId>junit</groupId>
+			<artifactId>junit</artifactId>
+			<version>${global_junit_version}</version>
+			<scope>test</scope>
+		</dependency>
+        <dependency>
+			<groupId>org.adrianwalker</groupId>
+			<artifactId>multiline-string</artifactId>
+			<version>0.1.2</version>
+			<scope>test</scope>
+		</dependency>
+    </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
index 7a680a4..6a20af8 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/metainfo.xml
@@ -420,6 +420,10 @@
             <package>
               <name>metron-maas-service</name>
             </package>
+            <package>
+              <name>metron-performance</name>
+            </package>
+
           </packages>
         </osSpecific>
         <osSpecific>

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-deployment/packaging/docker/deb-docker/pom.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/docker/deb-docker/pom.xml b/metron-deployment/packaging/docker/deb-docker/pom.xml
index 7fbe47b..cba2f98 100644
--- a/metron-deployment/packaging/docker/deb-docker/pom.xml
+++ b/metron-deployment/packaging/docker/deb-docker/pom.xml
@@ -155,6 +155,12 @@
                                         <include>*.tar.gz</include>
                                     </includes>
                                 </resource>
+                                <resource>
+                                    <directory>${metron_dir}/metron-contrib/metron-performance/target/</directory>
+                                    <includes>
+                                        <include>*.tar.gz</include>
+                                    </includes>
+                                </resource>
                             </resources>
                         </configuration>
                     </execution>

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
index cc01d7c..6b35dae 100644
--- a/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
+++ b/metron-deployment/packaging/docker/rpm-docker/SPECS/metron.spec
@@ -57,6 +57,7 @@ Source10:       metron-config-%{full_version}-archive.tar.gz
 Source11:       metron-management-%{full_version}-archive.tar.gz
 Source12:       metron-maas-service-%{full_version}-archive.tar.gz
 Source13:       metron-alerts-%{full_version}-archive.tar.gz
+Source14:       metron-performance-%{full_version}-archive.tar.gz
 
 %description
 Apache Metron provides a scalable advanced security analytics framework
@@ -93,6 +94,7 @@ tar -xzf %{SOURCE10} -C %{buildroot}%{metron_home}
 tar -xzf %{SOURCE11} -C %{buildroot}%{metron_home}
 tar -xzf %{SOURCE12} -C %{buildroot}%{metron_home}
 tar -xzf %{SOURCE13} -C %{buildroot}%{metron_home}
+tar -xzf %{SOURCE14} -C %{buildroot}%{metron_home}
 
 install %{buildroot}%{metron_home}/bin/metron-management-ui %{buildroot}/etc/init.d/
 install %{buildroot}%{metron_home}/bin/metron-alerts-ui %{buildroot}/etc/init.d/
@@ -185,6 +187,25 @@ This package installs the Metron Elasticsearch files
 
 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
+%package        performance
+Summary:        Metron Performance Tools
+Group:          Applications/Internet
+Provides:       performance = %{version}
+
+%description    performance
+This package installs performance tools useful for Metron
+
+%files          performance
+%defattr(-,root,root,755)
+%dir %{metron_root}
+%dir %{metron_home}
+%dir %{metron_home}/bin
+%dir %{metron_home}/lib
+%{metron_home}/bin/load_tool.sh
+%attr(0644,root,root) %{metron_home}/lib/metron-performance-%{full_version}.jar
+
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
 %package        data-management
 Summary:        Metron Data Management Files
 Group:          Applications/Internet

http://git-wip-us.apache.org/repos/asf/metron/blob/46ad9d93/metron-deployment/packaging/docker/rpm-docker/pom.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/docker/rpm-docker/pom.xml b/metron-deployment/packaging/docker/rpm-docker/pom.xml
index 4ed2edd..ef1716c 100644
--- a/metron-deployment/packaging/docker/rpm-docker/pom.xml
+++ b/metron-deployment/packaging/docker/rpm-docker/pom.xml
@@ -191,6 +191,12 @@
                                         <include>*.tar.gz</include>
                                     </includes>
                                 </resource>
+                                <resource>
+                                    <directory>${metron_dir}/metron-contrib/metron-performance/target/</directory>
+                                    <includes>
+                                        <include>*.tar.gz</include>
+                                    </includes>
+                                </resource>
                             </resources>
                         </configuration>
                     </execution>


[39/50] [abbrv] metron git commit: METRON-1499 Enable Configuration of Unified Enrichment Topology via Ambari (nickwallen) closes apache/metron#984

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/flux/enrichment/remote.yaml
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/flux/enrichment/remote.yaml b/metron-platform/metron-enrichment/src/main/flux/enrichment/remote.yaml
deleted file mode 100644
index fd7ceff..0000000
--- a/metron-platform/metron-enrichment/src/main/flux/enrichment/remote.yaml
+++ /dev/null
@@ -1,590 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-name: "enrichment"
-config:
-    topology.workers: ${enrichment.workers}
-    topology.acker.executors: ${enrichment.acker.executors}
-    topology.worker.childopts: ${topology.worker.childopts}
-    topology.auto-credentials: ${topology.auto-credentials}
-    topology.max.spout.pending: ${topology.max.spout.pending}
-
-components:
-
-# Enrichment
-    -   id: "stellarEnrichmentAdapter"
-        className: "org.apache.metron.enrichment.adapters.stellar.StellarAdapter"
-        configMethods:
-            -   name: "ofType"
-                args:
-                    - "ENRICHMENT"
-
-    # Any kafka props for the producer go here.
-    -   id: "kafkaWriterProps"
-        className: "java.util.HashMap"
-        configMethods:
-          -   name: "put"
-              args:
-                  - "security.protocol"
-                  - "${kafka.security.protocol}"
-
-    -   id: "stellarEnrichment"
-        className: "org.apache.metron.enrichment.configuration.Enrichment"
-        constructorArgs:
-            -   "stellar"
-            -   ref: "stellarEnrichmentAdapter"
-
-    -   id: "geoEnrichmentAdapter"
-        className: "org.apache.metron.enrichment.adapters.geo.GeoAdapter"
-    -   id: "geoEnrichment"
-        className: "org.apache.metron.enrichment.configuration.Enrichment"
-        constructorArgs:
-            -   "geo"
-            -   ref: "geoEnrichmentAdapter"
-    -   id: "hostEnrichmentAdapter"
-        className: "org.apache.metron.enrichment.adapters.host.HostFromJSONListAdapter"
-        constructorArgs:
-            - '${enrichment.host.known_hosts}'
-    -   id: "hostEnrichment"
-        className: "org.apache.metron.enrichment.configuration.Enrichment"
-        constructorArgs:
-            -   "host"
-            -   ref: "hostEnrichmentAdapter"
-
-    -   id: "simpleHBaseEnrichmentConfig"
-        className: "org.apache.metron.enrichment.adapters.simplehbase.SimpleHBaseConfig"
-        configMethods:
-            -   name: "withProviderImpl"
-                args:
-                    - "${hbase.provider.impl}"
-            -   name: "withHBaseTable"
-                args:
-                    - "${enrichment.simple.hbase.table}"
-            -   name: "withHBaseCF"
-                args:
-                    - "${enrichment.simple.hbase.cf}"
-    -   id: "simpleHBaseEnrichmentAdapter"
-        className: "org.apache.metron.enrichment.adapters.simplehbase.SimpleHBaseAdapter"
-        configMethods:
-           -    name: "withConfig"
-                args:
-                    - ref: "simpleHBaseEnrichmentConfig"
-    -   id: "simpleHBaseEnrichment"
-        className: "org.apache.metron.enrichment.configuration.Enrichment"
-        constructorArgs:
-          -   "hbaseEnrichment"
-          -   ref: "simpleHBaseEnrichmentAdapter"
-    -   id: "enrichments"
-        className: "java.util.ArrayList"
-        configMethods:
-            -   name: "add"
-                args:
-                    - ref: "geoEnrichment"
-            -   name: "add"
-                args:
-                    - ref: "hostEnrichment"
-            -   name: "add"
-                args:
-                    - ref: "simpleHBaseEnrichment"
-            -   name: "add"
-                args:
-                    - ref: "stellarEnrichment"
-
-    #enrichment error
-    -   id: "enrichmentErrorKafkaWriter"
-        className: "org.apache.metron.writer.kafka.KafkaWriter"
-        configMethods:
-            -   name: "withTopic"
-                args:
-                    - "${enrichment.error.topic}"
-            -   name: "withZkQuorum"
-                args:
-                    - "${kafka.zk}"
-            -   name: "withProducerConfigs"
-                args: 
-                    - ref: "kafkaWriterProps"
-
-# Threat Intel
-    -   id: "stellarThreatIntelAdapter"
-        className: "org.apache.metron.enrichment.adapters.stellar.StellarAdapter"
-        configMethods:
-            -   name: "ofType"
-                args:
-                    - "THREAT_INTEL"
-    -   id: "stellarThreatIntelEnrichment"
-        className: "org.apache.metron.enrichment.configuration.Enrichment"
-        constructorArgs:
-            -   "stellar"
-            -   ref: "stellarThreatIntelAdapter"
-    -   id: "simpleHBaseThreatIntelConfig"
-        className: "org.apache.metron.enrichment.adapters.threatintel.ThreatIntelConfig"
-        configMethods:
-            -   name: "withProviderImpl"
-                args:
-                    - "${hbase.provider.impl}"
-            -   name: "withTrackerHBaseTable"
-                args:
-                    - "${threat.intel.tracker.table}"
-            -   name: "withTrackerHBaseCF"
-                args:
-                    - "${threat.intel.tracker.cf}"
-            -   name: "withHBaseTable"
-                args:
-                    - "${threat.intel.simple.hbase.table}"
-            -   name: "withHBaseCF"
-                args:
-                    - "${threat.intel.simple.hbase.cf}"
-    -   id: "simpleHBaseThreatIntelAdapter"
-        className: "org.apache.metron.enrichment.adapters.threatintel.ThreatIntelAdapter"
-        configMethods:
-           -    name: "withConfig"
-                args:
-                    - ref: "simpleHBaseThreatIntelConfig"
-    -   id: "simpleHBaseThreatIntelEnrichment"
-        className: "org.apache.metron.enrichment.configuration.Enrichment"
-        constructorArgs:
-          -   "hbaseThreatIntel"
-          -   ref: "simpleHBaseThreatIntelAdapter"
-
-    -   id: "threatIntels"
-        className: "java.util.ArrayList"
-        configMethods:
-            -   name: "add"
-                args:
-                    - ref: "simpleHBaseThreatIntelEnrichment"
-            -   name: "add"
-                args:
-                    - ref: "stellarThreatIntelEnrichment"
-
-    #threatintel error
-    -   id: "threatIntelErrorKafkaWriter"
-        className: "org.apache.metron.writer.kafka.KafkaWriter"
-        configMethods:
-            -   name: "withTopic"
-                args:
-                    - "${threat.intel.error.topic}"
-            -   name: "withZkQuorum"
-                args:
-                    - "${kafka.zk}"
-            -   name: "withProducerConfigs"
-                args: 
-                    - ref: "kafkaWriterProps"
-#indexing
-    -   id: "kafkaWriter"
-        className: "org.apache.metron.writer.kafka.KafkaWriter"
-        configMethods:
-            -   name: "withTopic"
-                args:
-                    - "${enrichment.output.topic}"
-            -   name: "withZkQuorum"
-                args:
-                    - "${kafka.zk}"
-            -   name: "withProducerConfigs"
-                args: 
-                    - ref: "kafkaWriterProps"
-
-#kafka/zookeeper
-    # Any kafka props for the consumer go here.
-    -   id: "kafkaProps"
-        className: "java.util.HashMap"
-        configMethods:
-          -   name: "put"
-              args:
-                  - "value.deserializer"
-                  - "org.apache.kafka.common.serialization.ByteArrayDeserializer"
-          -   name: "put"
-              args:
-                  - "key.deserializer"
-                  - "org.apache.kafka.common.serialization.ByteArrayDeserializer"
-          -   name: "put"
-              args:
-                  - "group.id"
-                  - "enrichments"
-          -   name: "put"
-              args:
-                  - "security.protocol"
-                  - "${kafka.security.protocol}"
-
-
-  # The fields to pull out of the kafka messages
-    -   id: "fields"
-        className: "java.util.ArrayList"
-        configMethods:
-          -   name: "add"
-              args:
-                  - "value"
-
-    -   id: "kafkaConfig"
-        className: "org.apache.metron.storm.kafka.flux.SimpleStormKafkaBuilder"
-        constructorArgs:
-          - ref: "kafkaProps"
-          # topic name
-          - "${enrichment.input.topic}"
-          - "${kafka.zk}"
-          - ref: "fields"
-        configMethods:
-            -   name: "setFirstPollOffsetStrategy"
-                args:
-                    - "${kafka.start}"
-
-
-spouts:
-    -   id: "kafkaSpout"
-        className: "org.apache.metron.storm.kafka.flux.StormKafkaSpout"
-        constructorArgs:
-            - ref: "kafkaConfig"
-        parallelism: ${kafka.spout.parallelism}
-
-bolts:
-# Enrichment Bolts
-    -   id: "enrichmentSplitBolt"
-        className: "org.apache.metron.enrichment.bolt.EnrichmentSplitterBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withEnrichments"
-                args:
-                    - ref: "enrichments"
-        parallelism: ${enrichment.split.parallelism}
-
-    -   id: "geoEnrichmentBolt"
-        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withEnrichment"
-                args:
-                    - ref: "geoEnrichment"
-            -   name: "withMaxCacheSize"
-                args: [10000]
-            -   name: "withMaxTimeRetain"
-                args: [10]
-
-    -   id: "stellarEnrichmentBolt"
-        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withEnrichment"
-                args:
-                    - ref: "stellarEnrichment"
-            -   name: "withMaxCacheSize"
-                args: [10000]
-            -   name: "withMaxTimeRetain"
-                args: [10]
-        parallelism: ${enrichment.stellar.parallelism}
-
-    -   id: "hostEnrichmentBolt"
-        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withEnrichment"
-                args:
-                    - ref: "hostEnrichment"
-            -   name: "withMaxCacheSize"
-                args: [10000]
-            -   name: "withMaxTimeRetain"
-                args: [10]
-
-    -   id: "simpleHBaseEnrichmentBolt"
-        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withEnrichment"
-                args:
-                    - ref: "simpleHBaseEnrichment"
-            -   name: "withMaxCacheSize"
-                args: [10000]
-            -   name: "withMaxTimeRetain"
-                args: [10]
-
-    -   id: "enrichmentJoinBolt"
-        className: "org.apache.metron.enrichment.bolt.EnrichmentJoinBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withMaxCacheSize"
-                args: [${enrichment.join.cache.size}]
-            -   name: "withMaxTimeRetain"
-                args: [10]
-        parallelism: ${enrichment.join.parallelism}
-
-    -   id: "enrichmentErrorOutputBolt"
-        className: "org.apache.metron.writer.bolt.BulkMessageWriterBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withMessageWriter"
-                args:
-                    - ref: "enrichmentErrorKafkaWriter"
-
-
-# Threat Intel Bolts
-    -   id: "threatIntelSplitBolt"
-        className: "org.apache.metron.enrichment.bolt.ThreatIntelSplitterBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withEnrichments"
-                args:
-                    - ref: "threatIntels"
-            -   name: "withMessageFieldName"
-                args: ["message"]
-        parallelism: ${threat.intel.split.parallelism}
-
-    -   id: "simpleHBaseThreatIntelBolt"
-        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withEnrichment"
-                args:
-                    - ref: "simpleHBaseThreatIntelEnrichment"
-            -   name: "withMaxCacheSize"
-                args: [10000]
-            -   name: "withMaxTimeRetain"
-                args: [10]
-    -   id: "stellarThreatIntelBolt"
-        className: "org.apache.metron.enrichment.bolt.GenericEnrichmentBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withEnrichment"
-                args:
-                    - ref: "stellarThreatIntelEnrichment"
-            -   name: "withMaxCacheSize"
-                args: [10000]
-            -   name: "withMaxTimeRetain"
-                args: [10]
-        parallelism: ${threat.intel.stellar.parallelism}
-
-    -   id: "threatIntelJoinBolt"
-        className: "org.apache.metron.enrichment.bolt.ThreatIntelJoinBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withMaxCacheSize"
-                args: [${threat.intel.join.cache.size}]
-            -   name: "withMaxTimeRetain"
-                args: [10]
-        parallelism: ${threat.intel.join.parallelism}
-
-    -   id: "threatIntelErrorOutputBolt"
-        className: "org.apache.metron.writer.bolt.BulkMessageWriterBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withMessageWriter"
-                args:
-                    - ref: "threatIntelErrorKafkaWriter"
-
-# Indexing Bolts
-    -   id: "outputBolt"
-        className: "org.apache.metron.writer.bolt.BulkMessageWriterBolt"
-        constructorArgs:
-            - "${kafka.zk}"
-        configMethods:
-            -   name: "withMessageWriter"
-                args:
-                    - ref: "kafkaWriter"
-        parallelism: ${kafka.writer.parallelism}
-
-
-streams:
-#parser
-    -   name: "spout -> enrichmentSplit"
-        from: "kafkaSpout"
-        to: "enrichmentSplitBolt"
-        grouping:
-            type: LOCAL_OR_SHUFFLE
-
-#enrichment
-    -   name: "enrichmentSplit -> host"
-        from: "enrichmentSplitBolt"
-        to: "hostEnrichmentBolt"
-        grouping:
-            streamId: "host"
-            type: FIELDS
-            args: ["message"]
-
-    -   name: "enrichmentSplit -> geo"
-        from: "enrichmentSplitBolt"
-        to: "geoEnrichmentBolt"
-        grouping:
-            streamId: "geo"
-            type: FIELDS
-            args: ["message"]
-
-    -   name: "enrichmentSplit -> stellar"
-        from: "enrichmentSplitBolt"
-        to: "stellarEnrichmentBolt"
-        grouping:
-            streamId: "stellar"
-            type: FIELDS
-            args: ["message"]
-
-
-    -   name: "enrichmentSplit -> simpleHBaseEnrichmentBolt"
-        from: "enrichmentSplitBolt"
-        to: "simpleHBaseEnrichmentBolt"
-        grouping:
-            streamId: "hbaseEnrichment"
-            type: FIELDS
-            args: ["message"]
-
-    -   name: "splitter -> join"
-        from: "enrichmentSplitBolt"
-        to: "enrichmentJoinBolt"
-        grouping:
-            streamId: "message"
-            type: FIELDS
-            args: ["key"]
-
-    -   name: "geo -> join"
-        from: "geoEnrichmentBolt"
-        to: "enrichmentJoinBolt"
-        grouping:
-            streamId: "geo"
-            type: FIELDS
-            args: ["key"]
-
-    -   name: "stellar -> join"
-        from: "stellarEnrichmentBolt"
-        to: "enrichmentJoinBolt"
-        grouping:
-            streamId: "stellar"
-            type: FIELDS
-            args: ["key"]
-
-    -   name: "simpleHBaseEnrichmentBolt -> join"
-        from: "simpleHBaseEnrichmentBolt"
-        to: "enrichmentJoinBolt"
-        grouping:
-            streamId: "hbaseEnrichment"
-            type: FIELDS
-            args: ["key"]
-
-    -   name: "host -> join"
-        from: "hostEnrichmentBolt"
-        to: "enrichmentJoinBolt"
-        grouping:
-            streamId: "host"
-            type: FIELDS
-            args: ["key"]
-
-    # Error output
-    -   name: "geoEnrichmentBolt -> enrichmentErrorOutputBolt"
-        from: "geoEnrichmentBolt"
-        to: "enrichmentErrorOutputBolt"
-        grouping:
-            streamId: "error"
-            type: LOCAL_OR_SHUFFLE
-
-    -   name: "stellarEnrichmentBolt -> enrichmentErrorOutputBolt"
-        from: "stellarEnrichmentBolt"
-        to: "enrichmentErrorOutputBolt"
-        grouping:
-            streamId: "error"
-            type: LOCAL_OR_SHUFFLE
-
-    -   name: "hostEnrichmentBolt -> enrichmentErrorOutputBolt"
-        from: "hostEnrichmentBolt"
-        to: "enrichmentErrorOutputBolt"
-        grouping:
-            streamId: "error"
-            type: LOCAL_OR_SHUFFLE
-
-    -   name: "simpleHBaseEnrichmentBolt -> enrichmentErrorOutputBolt"
-        from: "simpleHBaseEnrichmentBolt"
-        to: "enrichmentErrorOutputBolt"
-        grouping:
-            streamId: "error"
-            type: LOCAL_OR_SHUFFLE
-
-#threat intel
-    -   name: "enrichmentJoin -> threatSplit"
-        from: "enrichmentJoinBolt"
-        to: "threatIntelSplitBolt"
-        grouping:
-            streamId: "message"
-            type: FIELDS
-            args: ["key"]
-
-    -   name: "threatSplit -> simpleHBaseThreatIntel"
-        from: "threatIntelSplitBolt"
-        to: "simpleHBaseThreatIntelBolt"
-        grouping:
-            streamId: "hbaseThreatIntel"
-            type: FIELDS
-            args: ["message"]
-
-    -   name: "threatSplit -> stellarThreatIntel"
-        from: "threatIntelSplitBolt"
-        to: "stellarThreatIntelBolt"
-        grouping:
-            streamId: "stellar"
-            type: FIELDS
-            args: ["message"]
-
-
-    -   name: "simpleHBaseThreatIntel -> join"
-        from: "simpleHBaseThreatIntelBolt"
-        to: "threatIntelJoinBolt"
-        grouping:
-            streamId: "hbaseThreatIntel"
-            type: FIELDS
-            args: ["key"]
-
-    -   name: "stellarThreatIntel -> join"
-        from: "stellarThreatIntelBolt"
-        to: "threatIntelJoinBolt"
-        grouping:
-            streamId: "stellar"
-            type: FIELDS
-            args: ["key"]
-
-    -   name: "threatIntelSplit -> threatIntelJoin"
-        from: "threatIntelSplitBolt"
-        to: "threatIntelJoinBolt"
-        grouping:
-            streamId: "message"
-            type: FIELDS
-            args: ["key"]
-#output
-    -   name: "threatIntelJoin -> output"
-        from: "threatIntelJoinBolt"
-        to: "outputBolt"
-        grouping:
-            streamId: "message"
-            type: LOCAL_OR_SHUFFLE
-
-    # Error output
-    -   name: "simpleHBaseThreatIntelBolt -> threatIntelErrorOutputBolt"
-        from: "simpleHBaseThreatIntelBolt"
-        to: "threatIntelErrorOutputBolt"
-        grouping:
-            streamId: "error"
-            type: LOCAL_OR_SHUFFLE
-
-    -   name: "stellarThreatIntelBolt -> threatIntelErrorOutputBolt"
-        from: "stellarThreatIntelBolt"
-        to: "threatIntelErrorOutputBolt"
-        grouping:
-            streamId: "error"
-            type: LOCAL_OR_SHUFFLE
-

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/main/scripts/start_enrichment_topology.sh
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/scripts/start_enrichment_topology.sh b/metron-platform/metron-enrichment/src/main/scripts/start_enrichment_topology.sh
index 6824b87..77c3a77 100755
--- a/metron-platform/metron-enrichment/src/main/scripts/start_enrichment_topology.sh
+++ b/metron-platform/metron-enrichment/src/main/scripts/start_enrichment_topology.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
-# 
+#
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -7,9 +7,9 @@
 # to you under the Apache License, Version 2.0 (the
 # "License"); you may not use this file except in compliance
 # with the License.  You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -19,4 +19,12 @@
 METRON_VERSION=${project.version}
 METRON_HOME=/usr/metron/$METRON_VERSION
 TOPOLOGY_JAR=${project.artifactId}-$METRON_VERSION-uber.jar
-storm jar $METRON_HOME/lib/$TOPOLOGY_JAR org.apache.storm.flux.Flux --remote $METRON_HOME/flux/enrichment/remote.yaml --filter $METRON_HOME/config/enrichment.properties
+
+# there are two enrichment topologies.  by default, the split-join enrichment topology is executed
+SPLIT_JOIN_ARGS="--remote $METRON_HOME/flux/enrichment/remote-splitjoin.yaml --filter $METRON_HOME/config/enrichment-splitjoin.properties"
+UNIFIED_ARGS="--remote $METRON_HOME/flux/enrichment/remote-unified.yaml --filter $METRON_HOME/config/enrichment-unified.properties"
+
+# by passing in different args, the user can execute an alternative enrichment topology
+ARGS=${@:-$SPLIT_JOIN_ARGS}
+
+storm jar $METRON_HOME/lib/$TOPOLOGY_JAR org.apache.storm.flux.Flux $ARGS

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java
index 3c55c95..2e22eab 100644
--- a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java
+++ b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java
@@ -65,16 +65,19 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+/**
+ * Integration test for the 'Split-Join' enrichment topology.
+ */
 public class EnrichmentIntegrationTest extends BaseIntegrationTest {
-  private static final String ERROR_TOPIC = "enrichment_error";
-  private static final String SRC_IP = "ip_src_addr";
-  private static final String DST_IP = "ip_dst_addr";
-  private static final String MALICIOUS_IP_TYPE = "malicious_ip";
-  private static final String PLAYFUL_CLASSIFICATION_TYPE = "playful_classification";
-  private static final Map<String, Object> PLAYFUL_ENRICHMENT = new HashMap<String, Object>() {{
+
+  public static final String ERROR_TOPIC = "enrichment_error";
+  public static final String SRC_IP = "ip_src_addr";
+  public static final String DST_IP = "ip_dst_addr";
+  public static final String MALICIOUS_IP_TYPE = "malicious_ip";
+  public static final String PLAYFUL_CLASSIFICATION_TYPE = "playful_classification";
+  public static final Map<String, Object> PLAYFUL_ENRICHMENT = new HashMap<String, Object>() {{
     put("orientation", "north");
   }};
-
   public static final String DEFAULT_COUNTRY = "test country";
   public static final String DEFAULT_CITY = "test city";
   public static final String DEFAULT_POSTAL_CODE = "test postalCode";
@@ -82,15 +85,18 @@ public class EnrichmentIntegrationTest extends BaseIntegrationTest {
   public static final String DEFAULT_LONGITUDE = "test longitude";
   public static final String DEFAULT_DMACODE= "test dmaCode";
   public static final String DEFAULT_LOCATION_POINT= Joiner.on(',').join(DEFAULT_LATITUDE,DEFAULT_LONGITUDE);
+  public static final String cf = "cf";
+  public static final String trackerHBaseTableName = "tracker";
+  public static final String threatIntelTableName = "threat_intel";
+  public static final String enrichmentsTableName = "enrichments";
 
-  protected String templatePath = "../metron-enrichment/src/main/config/enrichment.properties.j2";
   protected String sampleParsedPath = TestConstants.SAMPLE_DATA_PARSED_PATH + "TestExampleParsed";
   private final List<byte[]> inputMessages = getInputMessages(sampleParsedPath);
 
   private static File geoHdfsFile;
 
   protected String fluxPath() {
-    return "../metron-enrichment/src/main/flux/enrichment/remote.yaml";
+    return "../metron-enrichment/src/main/flux/enrichment/remote-splitjoin.yaml";
   }
 
   private static List<byte[]> getInputMessages(String path){
@@ -115,13 +121,22 @@ public class EnrichmentIntegrationTest extends BaseIntegrationTest {
     geoHdfsFile = new File(new File(baseDir), "GeoIP2-City-Test.mmdb.gz");
   }
 
-  @Test
-  public void test() throws Exception {
-    final String cf = "cf";
-    final String trackerHBaseTableName = "tracker";
-    final String threatIntelTableName = "threat_intel";
-    final String enrichmentsTableName = "enrichments";
-    final Properties topologyProperties = new Properties() {{
+  /**
+   * Returns the path to the topology properties template.
+   *
+   * @return The path to the topology properties template.
+   */
+  public String getTemplatePath() {
+    return "../metron-enrichment/src/main/config/enrichment-splitjoin.properties.j2";
+  }
+
+  /**
+   * Properties for the 'Split-Join' topology.
+   *
+   * @return The topology properties.
+   */
+  public Properties getTopologyProperties() {
+    return new Properties() {{
       setProperty("enrichment_workers", "1");
       setProperty("enrichment_acker_executors", "0");
       setProperty("enrichment_topology_worker_childopts", "");
@@ -142,11 +157,8 @@ public class EnrichmentIntegrationTest extends BaseIntegrationTest {
               "{\"ip\":\"10.1.128.237\", \"local\":\"UNKNOWN\", \"type\":\"unknown\", \"asset_value\" : \"important\"}," +
               "{\"ip\":\"10.60.10.254\", \"local\":\"YES\", \"type\":\"printer\", \"asset_value\" : \"important\"}," +
               "{\"ip\":\"10.0.2.15\", \"local\":\"YES\", \"type\":\"printer\", \"asset_value\" : \"important\"}]");
-
       setProperty("threatintel_hbase_table", threatIntelTableName);
       setProperty("threatintel_hbase_cf", cf);
-
-
       setProperty("enrichment_kafka_spout_parallelism", "1");
       setProperty("enrichment_split_parallelism", "1");
       setProperty("enrichment_stellar_parallelism", "1");
@@ -155,8 +167,13 @@ public class EnrichmentIntegrationTest extends BaseIntegrationTest {
       setProperty("threat_intel_stellar_parallelism", "1");
       setProperty("threat_intel_join_parallelism", "1");
       setProperty("kafka_writer_parallelism", "1");
-
     }};
+  }
+
+  @Test
+  public void test() throws Exception {
+
+    final Properties topologyProperties = getTopologyProperties();
     final ZKServerComponent zkServerComponent = getZKServerComponent(topologyProperties);
     final KafkaComponent kafkaComponent = getKafkaComponent(topologyProperties, new ArrayList<KafkaComponent.Topic>() {{
       add(new KafkaComponent.Topic(Constants.ENRICHMENT_TOPIC, 1));
@@ -196,7 +213,7 @@ public class EnrichmentIntegrationTest extends BaseIntegrationTest {
     FluxTopologyComponent fluxComponent = new FluxTopologyComponent.Builder()
             .withTopologyLocation(new File(fluxPath()))
             .withTopologyName("test")
-            .withTemplateLocation(new File(templatePath))
+            .withTemplateLocation(new File(getTemplatePath()))
             .withTopologyProperties(topologyProperties)
             .build();
 
@@ -531,7 +548,7 @@ public class EnrichmentIntegrationTest extends BaseIntegrationTest {
                     , message -> {
                       try {
                         return new HashMap<>(JSONUtils.INSTANCE.load(new String(message)
-                                , JSONUtils.MAP_SUPPLIER 
+                                , JSONUtils.MAP_SUPPLIER
                         )
                         );
                       } catch (Exception ex) {

http://git-wip-us.apache.org/repos/asf/metron/blob/82212ba8/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/UnifiedEnrichmentIntegrationTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/UnifiedEnrichmentIntegrationTest.java b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/UnifiedEnrichmentIntegrationTest.java
index 1f06733..5c19b39 100644
--- a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/UnifiedEnrichmentIntegrationTest.java
+++ b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/UnifiedEnrichmentIntegrationTest.java
@@ -17,7 +17,78 @@
  */
 package org.apache.metron.enrichment.integration;
 
+import org.apache.metron.common.Constants;
+import org.apache.metron.hbase.mock.MockHBaseTableProvider;
+
+import java.util.Properties;
+
+/**
+ * Integration test for the 'Unified' enrichment topology.
+ */
 public class UnifiedEnrichmentIntegrationTest extends EnrichmentIntegrationTest {
+
+  /**
+   * Returns the path to the topology properties template.
+   *
+   * @return The path to the topology properties template.
+   */
+  public String getTemplatePath() {
+    return "../metron-enrichment/src/main/config/enrichment-unified.properties.j2";
+  }
+
+  /**
+   * Properties for the 'Unified' topology.
+   *
+   * @return The topology properties.
+   */
+  @Override
+  public Properties getTopologyProperties() {
+    return new Properties() {{
+
+      // storm
+      setProperty("enrichment_workers", "1");
+      setProperty("enrichment_acker_executors", "0");
+      setProperty("enrichment_topology_worker_childopts", "");
+      setProperty("topology_auto_credentials", "[]");
+      setProperty("enrichment_topology_max_spout_pending", "500");
+
+      // kafka - zookeeper_quorum, kafka_brokers set elsewhere
+      setProperty("kafka_security_protocol", "PLAINTEXT");
+      setProperty("enrichment_kafka_start", "UNCOMMITTED_EARLIEST");
+      setProperty("enrichment_input_topic", Constants.ENRICHMENT_TOPIC);
+      setProperty("enrichment_output_topic", Constants.INDEXING_TOPIC);
+      setProperty("enrichment_error_topic", ERROR_TOPIC);
+      setProperty("threatintel_error_topic", ERROR_TOPIC);
+
+      // enrichment
+      setProperty("enrichment_hbase_provider_impl", "" + MockHBaseTableProvider.class.getName());
+      setProperty("enrichment_hbase_table", enrichmentsTableName);
+      setProperty("enrichment_hbase_cf", cf);
+      setProperty("enrichment_host_known_hosts", "[{\"ip\":\"10.1.128.236\", \"local\":\"YES\", \"type\":\"webserver\", \"asset_value\" : \"important\"}," +
+              "{\"ip\":\"10.1.128.237\", \"local\":\"UNKNOWN\", \"type\":\"unknown\", \"asset_value\" : \"important\"}," +
+              "{\"ip\":\"10.60.10.254\", \"local\":\"YES\", \"type\":\"printer\", \"asset_value\" : \"important\"}," +
+              "{\"ip\":\"10.0.2.15\", \"local\":\"YES\", \"type\":\"printer\", \"asset_value\" : \"important\"}]");
+
+      // threat intel
+      setProperty("threatintel_hbase_table", threatIntelTableName);
+      setProperty("threatintel_hbase_cf", cf);
+
+      // parallelism
+      setProperty("unified_kafka_spout_parallelism", "1");
+      setProperty("unified_enrichment_parallelism", "1");
+      setProperty("unified_threat_intel_parallelism", "1");
+      setProperty("unified_kafka_writer_parallelism", "1");
+
+      // caches
+      setProperty("unified_enrichment_cache_size", "1000");
+      setProperty("unified_threat_intel_cache_size", "1000");
+
+      // threads
+      setProperty("unified_enrichment_threadpool_size", "1");
+      setProperty("unified_enrichment_threadpool_type", "FIXED");
+    }};
+  }
+
   @Override
   public String fluxPath() {
     return "../metron-enrichment/src/main/flux/enrichment/remote-unified.yaml";


[10/50] [abbrv] metron git commit: METRON-590 Enable Use of Event Time in Profiler (nickwallen) closes apache/metron#965

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
index ceb9e4e..ccce022 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/params/params_linux.py
@@ -287,6 +287,8 @@ profiler_input_topic = config['configurations']['metron-enrichment-env']['enrich
 profiler_kafka_start = config['configurations']['metron-profiler-env']['profiler_kafka_start']
 profiler_period_duration = config['configurations']['metron-profiler-env']['profiler_period_duration']
 profiler_period_units = config['configurations']['metron-profiler-env']['profiler_period_units']
+profiler_window_duration = config['configurations']['metron-profiler-env']['profiler_window_duration']
+profiler_window_units = config['configurations']['metron-profiler-env']['profiler_window_units']
 profiler_ttl = config['configurations']['metron-profiler-env']['profiler_ttl']
 profiler_ttl_units = config['configurations']['metron-profiler-env']['profiler_ttl_units']
 profiler_hbase_batch = config['configurations']['metron-profiler-env']['profiler_hbase_batch']
@@ -302,6 +304,11 @@ profiler_hbase_acl_configured_flag_file = status_params.profiler_hbase_acl_confi
 if not len(profiler_topology_worker_childopts) == 0:
     profiler_topology_worker_childopts += ' '
 profiler_topology_worker_childopts += config['configurations']['metron-profiler-env']['profiler_topology_worker_childopts']
+profiler_max_routes_per_bolt=config['configurations']['metron-profiler-env']['profiler_max_routes_per_bolt']
+profiler_window_lag=config['configurations']['metron-profiler-env']['profiler_window_lag']
+profiler_window_lag_units=config['configurations']['metron-profiler-env']['profiler_window_lag_units']
+profiler_topology_message_timeout_secs=config['configurations']['metron-profiler-env']['profiler_topology_message_timeout_secs']
+profiler_topology_max_spout_pending=config['configurations']['metron-profiler-env']['profiler_topology_max_spout_pending']
 
 # Indexing
 ra_indexing_kafka_start = config['configurations']['metron-indexing-env']['ra_indexing_kafka_start']

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/profiler.properties.j2
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/profiler.properties.j2 b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/profiler.properties.j2
index 06fd209..fabdaa7 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/profiler.properties.j2
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/templates/profiler.properties.j2
@@ -22,6 +22,10 @@
 
 topology.worker.childopts={{profiler_topology_worker_childopts}}
 topology.auto-credentials={{topology_auto_credentials}}
+profiler.workers={{profiler_topology_workers}}
+profiler.executors={{profiler_acker_executors}}
+topology.message.timeout.secs={{profiler_topology_message_timeout_secs}}
+topology.max.spout.pending={{profiler_topology_max_spout_pending}}
 
 ##### Profiler #####
 
@@ -29,10 +33,16 @@ profiler.input.topic={{enrichment_output_topic}}
 profiler.output.topic={{enrichment_input_topic}}
 profiler.period.duration={{profiler_period_duration}}
 profiler.period.duration.units={{profiler_period_units}}
-profiler.workers={{profiler_topology_workers}}
-profiler.executors={{profiler_acker_executors}}
+profiler.window.duration={{profiler_window_duration}}
+profiler.window.duration.units={{profiler_window_units}}
 profiler.ttl={{profiler_ttl}}
 profiler.ttl.units={{profiler_ttl_units}}
+profiler.window.lag={{profiler_window_lag}}
+profiler.window.lag.units={{profiler_window_lag_units}}
+profiler.max.routes.per.bolt={{profiler_max_routes_per_bolt}}
+
+##### HBase #####
+
 profiler.hbase.salt.divisor=1000
 profiler.hbase.table={{profiler_hbase_table}}
 profiler.hbase.column.family={{profiler_hbase_cf}}
@@ -43,6 +53,5 @@ profiler.hbase.flush.interval.seconds={{profiler_hbase_flush_interval}}
 
 kafka.zk={{zookeeper_quorum}}
 kafka.broker={{kafka_brokers}}
-# One of EARLIEST, LATEST, UNCOMMITTED_EARLIEST, UNCOMMITTED_LATEST
 kafka.start={{profiler_kafka_start}}
 kafka.security.protocol={{kafka_security_protocol}}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
index cef9a3b..234b551 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/themes/metron_theme.json
@@ -221,8 +221,27 @@
               "tab-rows": "3",
               "sections": [
                 {
+                  "name": "section-profiler-setup",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                    "name": "subsection-profiler-setup",
+                    "display-name": "Profiler Setup",
+                    "row-index": "0",
+                    "column-index": "0",
+                    "row-span": "1",
+                    "column-span": "1"
+                  }
+                  ]
+                },
+                {
                 "name": "section-profiler-kafka",
-                "row-index": "0",
+                "row-index": "1",
                 "column-index": "0",
                 "row-span": "1",
                 "column-span": "1",
@@ -240,8 +259,8 @@
                 ]
               },
               {
-                "name": "section-profiler-setup",
-                "row-index": "1",
+                "name": "section-profiler-storm",
+                "row-index": "2",
                 "column-index": "0",
                 "row-span": "1",
                 "column-span": "1",
@@ -249,8 +268,8 @@
                 "section-rows": "1",
                 "subsections": [
                   {
-                  "name": "subsection-profiler-setup",
-                  "display-name": "Profiler Setup",
+                  "name": "subsection-profiler-storm",
+                  "display-name": "Storm",
                   "row-index": "0",
                   "column-index": "0",
                   "row-span": "1",
@@ -259,8 +278,8 @@
                 ]
               },
               {
-                "name": "section-profiler-storm",
-                "row-index": "2",
+                "name": "section-profiler-hbase",
+                "row-index": "3",
                 "column-index": "0",
                 "row-span": "1",
                 "column-span": "1",
@@ -268,8 +287,8 @@
                 "section-rows": "1",
                 "subsections": [
                   {
-                  "name": "subsection-profiler-storm",
-                  "display-name": "Storm",
+                  "name": "subsection-profiler-hbase",
+                  "display-name": "HBase",
                   "row-index": "0",
                   "column-index": "0",
                   "row-span": "1",
@@ -568,7 +587,6 @@
           "config": "metron-indexing-env/bolt_hdfs_rotation_policy_count",
           "subsection-name": "subsection-indexing-hdfs"
         },
-
         {
           "config": "metron-profiler-env/profiler_kafka_start",
           "subsection-name": "subsection-profiler-kafka"
@@ -582,6 +600,14 @@
           "subsection-name": "subsection-profiler-setup"
         },
         {
+          "config": "metron-profiler-env/profiler_window_duration",
+          "subsection-name": "subsection-profiler-setup"
+        },
+        {
+          "config": "metron-profiler-env/profiler_window_units",
+          "subsection-name": "subsection-profiler-setup"
+        },
+        {
           "config": "metron-profiler-env/profiler_ttl",
           "subsection-name": "subsection-profiler-setup"
         },
@@ -590,20 +616,32 @@
           "subsection-name": "subsection-profiler-setup"
         },
         {
-          "config": "metron-profiler-env/profiler_hbase_table",
+          "config": "metron-profiler-env/profiler_window_lag",
           "subsection-name": "subsection-profiler-setup"
         },
         {
-          "config": "metron-profiler-env/profiler_hbase_cf",
+          "config": "metron-profiler-env/profiler_window_lag_units",
           "subsection-name": "subsection-profiler-setup"
         },
         {
-          "config": "metron-profiler-env/profiler_hbase_batch",
+          "config": "metron-profiler-env/profiler_max_routes_per_bolt",
           "subsection-name": "subsection-profiler-setup"
         },
         {
+          "config": "metron-profiler-env/profiler_hbase_table",
+          "subsection-name": "subsection-profiler-hbase"
+        },
+        {
+          "config": "metron-profiler-env/profiler_hbase_cf",
+          "subsection-name": "subsection-profiler-hbase"
+        },
+        {
+          "config": "metron-profiler-env/profiler_hbase_batch",
+          "subsection-name": "subsection-profiler-hbase"
+        },
+        {
           "config": "metron-profiler-env/profiler_hbase_flush_interval",
-          "subsection-name": "subsection-profiler-setup"
+          "subsection-name": "subsection-profiler-hbase"
         },
         {
           "config": "metron-profiler-env/profiler_topology_worker_childopts",
@@ -618,6 +656,14 @@
           "subsection-name": "subsection-profiler-storm"
         },
         {
+          "config": "metron-profiler-env/profiler_topology_message_timeout_secs",
+          "subsection-name": "subsection-profiler-storm"
+        },
+        {
+          "config": "metron-profiler-env/profiler_topology_max_spout_pending",
+          "subsection-name": "subsection-profiler-storm"
+        },
+        {
           "config": "metron-rest-env/metron_rest_port",
           "subsection-name": "subsection-rest"
         },
@@ -905,7 +951,6 @@
           "type": "text-field"
         }
       },
-
       {
         "config": "metron-indexing-env/batch_indexing_acker_executors",
         "widget": {
@@ -1004,6 +1049,18 @@
         }
       },
       {
+        "config": "metron-profiler-env/profiler_window_duration",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-profiler-env/profiler_window_units",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
         "config": "metron-profiler-env/profiler_ttl",
         "widget": {
           "type": "text-field"
@@ -1016,6 +1073,24 @@
         }
       },
       {
+        "config": "metron-profiler-env/profiler_max_routes_per_bolt",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-profiler-env/profiler_window_lag",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-profiler-env/profiler_window_lag_units",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
         "config": "metron-profiler-env/profiler_hbase_table",
         "widget": {
           "type": "text-field"
@@ -1057,7 +1132,18 @@
           "type": "text-field"
         }
       },
-
+      {
+        "config": "metron-profiler-env/profiler_topology_max_spout_pending",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "metron-profiler-env/profiler_topology_message_timeout_secs",
+        "widget": {
+          "type": "text-field"
+        }
+      },
       {
         "config": "metron-rest-env/metron_rest_port",
         "widget": {

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
index 06c82d2..6205fbf 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfileConfig.java
@@ -89,6 +89,9 @@ public class ProfileConfig implements Serializable {
    */
   private Long expires;
 
+  public ProfileConfig() {
+  }
+
   /**
    * A profile definition requires at the very least the profile name, the foreach, and result
    * expressions.
@@ -114,6 +117,11 @@ public class ProfileConfig implements Serializable {
     this.profile = profile;
   }
 
+  public ProfileConfig withProfile(String profile) {
+    this.profile = profile;
+    return this;
+  }
+
   public String getForeach() {
     return foreach;
   }
@@ -122,6 +130,11 @@ public class ProfileConfig implements Serializable {
     this.foreach = foreach;
   }
 
+  public ProfileConfig withForeach(String foreach) {
+    this.foreach = foreach;
+    return this;
+  }
+
   public String getOnlyif() {
     return onlyif;
   }
@@ -130,6 +143,11 @@ public class ProfileConfig implements Serializable {
     this.onlyif = onlyif;
   }
 
+  public ProfileConfig withOnlyif(String onlyif) {
+    this.onlyif = onlyif;
+    return this;
+  }
+
   public Map<String, String> getInit() {
     return init;
   }
@@ -138,6 +156,16 @@ public class ProfileConfig implements Serializable {
     this.init = init;
   }
 
+  public ProfileConfig withInit(Map<String, String> init) {
+    this.init.putAll(init);
+    return this;
+  }
+
+  public ProfileConfig withInit(String var, String expression) {
+    this.init.put(var, expression);
+    return this;
+  }
+
   public Map<String, String> getUpdate() {
     return update;
   }
@@ -146,6 +174,16 @@ public class ProfileConfig implements Serializable {
     this.update = update;
   }
 
+  public ProfileConfig withUpdate(Map<String, String> update) {
+    this.update.putAll(update);
+    return this;
+  }
+
+  public ProfileConfig withUpdate(String var, String expression) {
+    this.update.put(var, expression);
+    return this;
+  }
+
   public List<String> getGroupBy() {
     return groupBy;
   }
@@ -154,6 +192,11 @@ public class ProfileConfig implements Serializable {
     this.groupBy = groupBy;
   }
 
+  public ProfileConfig withGroupBy(List<String> groupBy) {
+    this.groupBy = groupBy;
+    return this;
+  }
+
   public ProfileResult getResult() {
     return result;
   }
@@ -162,6 +205,11 @@ public class ProfileConfig implements Serializable {
     this.result = result;
   }
 
+  public ProfileConfig withResult(String profileExpression) {
+    this.result = new ProfileResult(profileExpression);
+    return this;
+  }
+
   public Long getExpires() {
     return expires;
   }
@@ -170,6 +218,11 @@ public class ProfileConfig implements Serializable {
     this.expires = expiresDays;
   }
 
+  public ProfileConfig withExpires(Long expiresDays) {
+    this.expires = TimeUnit.DAYS.toMillis(expiresDays);
+    return this;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) return true;

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java
index e7c081a..0bdb7e2 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/profiler/ProfilerConfig.java
@@ -20,9 +20,10 @@ package org.apache.metron.common.configuration.profiler;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Optional;
 
 /**
- * The definition for entire Profiler, which may contain many Profile definitions.
+ * The configuration object for the Profiler, which may contain many Profile definitions.
  */
 public class ProfilerConfig implements Serializable {
 
@@ -31,6 +32,20 @@ public class ProfilerConfig implements Serializable {
    */
   private List<ProfileConfig> profiles = new ArrayList<>();
 
+  /**
+   * The name of a field containing the timestamp that is used to
+   * generate profiles.
+   *
+   * <p>By default, the processing time of the Profiler is used rather
+   * than event time; a value contained within the message itself.
+   *
+   * <p>The field must contain a timestamp in epoch milliseconds.
+   *
+   * <p>If a message does NOT contain this field, it will be dropped
+   * and not included in any profiles.
+   */
+  private Optional<String> timestampField = Optional.empty();
+
   public List<ProfileConfig> getProfiles() {
     return profiles;
   }
@@ -39,10 +54,33 @@ public class ProfilerConfig implements Serializable {
     this.profiles = profiles;
   }
 
+  public ProfilerConfig withProfile(ProfileConfig profileConfig) {
+    this.profiles.add(profileConfig);
+    return this;
+  }
+
+  public Optional<String> getTimestampField() {
+    return timestampField;
+  }
+
+  public void setTimestampField(String timestampField) {
+    this.timestampField = Optional.of(timestampField);
+  }
+
+  public void setTimestampField(Optional<String> timestampField) {
+    this.timestampField = timestampField;
+  }
+
+  public ProfilerConfig withTimestampField(Optional<String> timestampField) {
+    this.timestampField = timestampField;
+    return this;
+  }
+
   @Override
   public String toString() {
     return "ProfilerConfig{" +
             "profiles=" + profiles +
+            ", timestampField='" + timestampField + '\'' +
             '}';
   }
 
@@ -50,13 +88,15 @@ public class ProfilerConfig implements Serializable {
   public boolean equals(Object o) {
     if (this == o) return true;
     if (o == null || getClass() != o.getClass()) return false;
-
     ProfilerConfig that = (ProfilerConfig) o;
-    return profiles != null ? profiles.equals(that.profiles) : that.profiles == null;
+    if (profiles != null ? !profiles.equals(that.profiles) : that.profiles != null) return false;
+    return timestampField != null ? timestampField.equals(that.timestampField) : that.timestampField == null;
   }
 
   @Override
   public int hashCode() {
-    return profiles != null ? profiles.hashCode() : 0;
+    int result = profiles != null ? profiles.hashCode() : 0;
+    result = 31 * result + (timestampField != null ? timestampField.hashCode() : 0);
+    return result;
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java
index c02f19d..02e6015 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/utils/JSONUtils.java
@@ -24,6 +24,10 @@ import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.flipkart.zjsonpatch.JsonPatch;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+
 import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.FileInputStream;
@@ -31,17 +35,10 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.lang.reflect.ParameterizedType;
 import java.lang.reflect.Type;
-import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.function.Supplier;
 
-import com.google.common.reflect.TypeToken;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-import org.json.simple.parser.ParseException;
-
 public enum JSONUtils {
   INSTANCE;
 

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-platform/metron-common/src/main/java/org/apache/metron/common/zookeeper/configurations/ProfilerUpdater.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/zookeeper/configurations/ProfilerUpdater.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/zookeeper/configurations/ProfilerUpdater.java
index 68c5203..4976d30 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/zookeeper/configurations/ProfilerUpdater.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/zookeeper/configurations/ProfilerUpdater.java
@@ -33,6 +33,7 @@ import java.util.Map;
 import java.util.function.Supplier;
 
 public class ProfilerUpdater extends ConfigurationsUpdater<ProfilerConfigurations> {
+
   public ProfilerUpdater(Reloadable reloadable, Supplier<ProfilerConfigurations> configSupplier) {
     super(reloadable, configSupplier);
   }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java b/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java
index a0e115d..e178ee0 100644
--- a/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java
+++ b/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfileConfigTest.java
@@ -27,10 +27,11 @@ import org.junit.Test;
 import java.io.IOException;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
 
 /**
- * Ensures that Profile definitions have the expected defaults
+ * Tests the {@link ProfileConfig} class.
+ *
+ * Ensures that profile definitions have the expected defaults
  * and can be (de)serialized to and from JSON.
  */
 public class ProfileConfigTest {

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfilerConfigTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfilerConfigTest.java b/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfilerConfigTest.java
new file mode 100644
index 0000000..2e73cde
--- /dev/null
+++ b/metron-platform/metron-common/src/test/java/org/apache/metron/common/configuration/profiler/ProfilerConfigTest.java
@@ -0,0 +1,120 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+package org.apache.metron.common.configuration.profiler;
+
+import org.adrianwalker.multilinestring.Multiline;
+import org.apache.metron.common.utils.JSONUtils;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests the {@link ProfilerConfig} class.
+ */
+public class ProfilerConfigTest {
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "profile1",
+   *        "foreach": "ip_src_addr",
+   *        "init":   { "count": "0" },
+   *        "update": { "count": "count + 1" },
+   *        "result":   "count"
+   *      }
+   *   ]
+   * }
+   */
+  @Multiline
+  private String noTimestampField;
+
+  /**
+   * If no 'timestampField' is defined, it should not be present by default.
+   */
+  @Test
+  public void testNoTimestampField() throws IOException {
+    ProfilerConfig conf = JSONUtils.INSTANCE.load(noTimestampField, ProfilerConfig.class);
+    assertFalse(conf.getTimestampField().isPresent());
+  }
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "profile1",
+   *        "foreach": "ip_src_addr",
+   *        "init":   { "count": "0" },
+   *        "update": { "count": "count + 1" },
+   *        "result":   "count"
+   *      }
+   *   ],
+   *   "timestampField": "timestamp"
+   * }
+   */
+  @Multiline
+  private String timestampField;
+
+  /**
+   * If no 'timestampField' is defined, it should not be present by default.
+   */
+  @Test
+  public void testTimestampField() throws IOException {
+    ProfilerConfig conf = JSONUtils.INSTANCE.load(timestampField, ProfilerConfig.class);
+    assertTrue(conf.getTimestampField().isPresent());
+  }
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "profile1",
+   *        "foreach": "ip_src_addr",
+   *        "init":   { "count": "0" },
+   *        "update": { "count": "count + 1" },
+   *        "result":   "count"
+   *      },
+   *      {
+   *        "profile": "profile2",
+   *        "foreach": "ip_dst_addr",
+   *        "init":   { "count": "0" },
+   *        "update": { "count": "count + 1" },
+   *        "result":   "count"
+   *      }
+   *   ]
+   * }
+   */
+  @Multiline
+  private String twoProfiles;
+
+  /**
+   * The 'onlyif' field should default to 'true' when it is not specified.
+   */
+  @Test
+  public void testTwoProfiles() throws IOException {
+    ProfilerConfig conf = JSONUtils.INSTANCE.load(twoProfiles, ProfilerConfig.class);
+    assertEquals(2, conf.getProfiles().size());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-platform/metron-integration-test/src/main/java/org/apache/metron/integration/components/KafkaComponent.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-integration-test/src/main/java/org/apache/metron/integration/components/KafkaComponent.java b/metron-platform/metron-integration-test/src/main/java/org/apache/metron/integration/components/KafkaComponent.java
index 9d8c57e..08910be 100644
--- a/metron-platform/metron-integration-test/src/main/java/org/apache/metron/integration/components/KafkaComponent.java
+++ b/metron-platform/metron-integration-test/src/main/java/org/apache/metron/integration/components/KafkaComponent.java
@@ -30,6 +30,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 import java.util.logging.Level;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
 import kafka.api.FetchRequest;
 import kafka.api.FetchRequestBuilder;
 import kafka.common.TopicExistsException;
@@ -48,6 +51,7 @@ import kafka.utils.Time;
 import kafka.utils.ZKStringSerializer$;
 import kafka.utils.ZkUtils;
 import org.I0Itec.zkclient.ZkClient;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.metron.integration.InMemoryComponent;
@@ -314,11 +318,44 @@ public class KafkaComponent implements InMemoryComponent {
     }
   }
 
+  /**
+   * Write a collection of messages to a Kafka topic.
+   *
+   * @param topic The name of the Kafka topic.
+   * @param messages The collection of messages to write.
+   */
   public void writeMessages(String topic, Collection<byte[]> messages) {
     try(KafkaProducer<String, byte[]> kafkaProducer = createProducer()) {
       for (byte[] message : messages) {
-        kafkaProducer.send(new ProducerRecord<String, byte[]>(topic, message));
+        kafkaProducer.send(new ProducerRecord<>(topic, message));
       }
     }
   }
+
+  /**
+   * Write messages to a Kafka topic.
+   *
+   * @param topic The name of the Kafka topic.
+   * @param messages The messages to write.
+   */
+  public void writeMessages(String topic, String ...messages) {
+
+    // convert each message to raw bytes
+    List<byte[]> messagesAsBytes = Stream.of(messages)
+            .map(Bytes::toBytes)
+            .collect(Collectors.toList());
+
+    writeMessages(topic, messagesAsBytes);
+  }
+
+  /**
+   * Write messages to a Kafka topic.
+   *
+   * @param topic The name of the Kafka topic.
+   * @param messages The messages to write.
+   */
+  public void writeMessages(String topic, List<String> messages) {
+
+    writeMessages(topic, messages.toArray(new String[] {}));
+  }
 }


[41/50] [abbrv] metron git commit: METRON-1527: Remove dead test file sitting in source folder (mmiklavc via mmiklavc) closes apache/metron#994

Posted by rm...@apache.org.
METRON-1527: Remove dead test file sitting in source folder (mmiklavc via mmiklavc) closes apache/metron#994


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/08252f59
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/08252f59
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/08252f59

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 08252f59b2df1f4ef149b899c944f6a8a0aa049e
Parents: 82212ba
Author: mmiklavc <mi...@gmail.com>
Authored: Tue Apr 17 10:55:08 2018 -0600
Committer: Michael Miklavcic <mi...@gmail.com>
Committed: Tue Apr 17 10:55:08 2018 -0600

----------------------------------------------------------------------
 .../org/apache/metron/common/writer/test.json   | 31 --------------------
 1 file changed, 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/08252f59/metron-platform/metron-common/src/main/java/org/apache/metron/common/writer/test.json
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/writer/test.json b/metron-platform/metron-common/src/main/java/org/apache/metron/common/writer/test.json
deleted file mode 100644
index 023cd63..0000000
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/writer/test.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
-  "index": "bro",
-  "batchSize": 1,
-  "enrichment" : {
-    "fieldMap":
-    {
-      "geo": ["ip_dst_addr", "ip_src_addr"],
-      "host": ["host"]
-    }
-  },
-  "threatIntel" : {
-    "fieldMap":
-    {
-      "hbaseThreatIntel": ["ip_src_addr", "ip_dst_addr"]
-    },
-    "fieldToTypeMap":
-    {
-      "ip_src_addr" : ["malicious_ip"],
-      "ip_dst_addr" : ["malicious_ip"]
-    },
-    "triageConfig" : {
-      "riskLevelRules" : [
-        {
-          "rule" : "ip_src_addr == '31.24.30.31'",
-          "score" : 10
-        }
-      ],
-      "aggregator" : "MAX"
-    }
-  }
-}


[37/50] [abbrv] metron git commit: METRON-1522 Fix the typo errors at profile debugger readme (MohanDV via nickwallen) closes apache/metron#992

Posted by rm...@apache.org.
METRON-1522 Fix the typo errors at profile debugger readme  (MohanDV via nickwallen) closes apache/metron#992


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/1d3e7fcd
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/1d3e7fcd
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/1d3e7fcd

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 1d3e7fcd7db64e7cb6d986802240fcbae174ae91
Parents: f8b7c58
Author: MohanDV <mo...@gmail.com>
Authored: Fri Apr 13 16:50:14 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Fri Apr 13 16:50:14 2018 -0400

----------------------------------------------------------------------
 metron-analytics/metron-profiler/README.md | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/1d3e7fcd/metron-analytics/metron-profiler/README.md
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/README.md b/metron-analytics/metron-profiler/README.md
index 218ec66..01918aa 100644
--- a/metron-analytics/metron-profiler/README.md
+++ b/metron-analytics/metron-profiler/README.md
@@ -152,8 +152,8 @@ Creating and refining profiles is an iterative process.  Iterating against a liv
 	* If a message is needed by two profiles, then two routes have been followed.
 
 	```
-	[Stellar]>>> p := PROFILER_INIT(conf)
-	[Stellar]>>> p
+	[Stellar]>>> profiler := PROFILER_INIT(conf)
+	[Stellar]>>> profiler
 	Profiler{1 profile(s), 0 messages(s), 0 route(s)}
 	```
 
@@ -172,11 +172,11 @@ Creating and refining profiles is an iterative process.  Iterating against a liv
 1. Apply the message to your Profiler, as many times as you like.
 
 	```
-	[Stellar]>>> PROFILER_APPLY(msg, p)
+	[Stellar]>>> PROFILER_APPLY(msg, profiler)
 	Profiler{1 profile(s), 1 messages(s), 1 route(s)}
 	```
 	```
-	[Stellar]>>> PROFILER_APPLY(msg, p)
+	[Stellar]>>> PROFILER_APPLY(msg, profiler)
 	Profiler{1 profile(s), 2 messages(s), 2 route(s)}
 	```
 
@@ -205,7 +205,7 @@ Creating and refining profiles is an iterative process.  Iterating against a liv
 	```
 	Apply those 10 messages to your profile(s).
 	```
-	[Stellar]>>> PROFILER_APPLY(msgs, p)
+	[Stellar]>>> PROFILER_APPLY(msgs, profiler)
 	  Profiler{1 profile(s), 10 messages(s), 10 route(s)}
 	```
 


[32/50] [abbrv] metron git commit: METRON-1516 Support for Ansible 2.5.0 (ottobackwards) closes apache/metron#989

Posted by rm...@apache.org.
METRON-1516 Support for Ansible 2.5.0 (ottobackwards) closes apache/metron#989


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/ea6992fd
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/ea6992fd
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/ea6992fd

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: ea6992fd943c552c41565c7a320804cb58b733bd
Parents: 62d1a1b
Author: ottobackwards <ot...@gmail.com>
Authored: Thu Apr 12 06:15:38 2018 -0400
Committer: otto <ot...@apache.org>
Committed: Thu Apr 12 06:15:38 2018 -0400

----------------------------------------------------------------------
 metron-deployment/amazon-ec2/README.md                      | 2 +-
 metron-deployment/amazon-ec2/playbook.yml                   | 4 ++--
 metron-deployment/ansible/playbooks/metron_full_install.yml | 4 ++--
 metron-deployment/development/centos6/README.md             | 2 +-
 metron-deployment/development/ubuntu14/README.md            | 2 +-
 5 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/ea6992fd/metron-deployment/amazon-ec2/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/amazon-ec2/README.md b/metron-deployment/amazon-ec2/README.md
index bc259ec..73a3d70 100644
--- a/metron-deployment/amazon-ec2/README.md
+++ b/metron-deployment/amazon-ec2/README.md
@@ -38,7 +38,7 @@ Getting Started
 
 The host used to deploy Apache Metron will need the following software tools installed.  The following versions are known to work as of the time of this writing, but by no means are these the only working versions.
 
-  - Ansible 2.0.0.2 or 2.2.2.0
+  - Ansible 2.0.0.2, 2.2.2.0, or 2.5.0
   - Python 2.7.11
   - Maven 3.3.9  
 

http://git-wip-us.apache.org/repos/asf/metron/blob/ea6992fd/metron-deployment/amazon-ec2/playbook.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/amazon-ec2/playbook.yml b/metron-deployment/amazon-ec2/playbook.yml
index de64490..470a181 100644
--- a/metron-deployment/amazon-ec2/playbook.yml
+++ b/metron-deployment/amazon-ec2/playbook.yml
@@ -23,8 +23,8 @@
     - conf/defaults.yml
   pre_tasks:
     - name: Verify Ansible Version
-      fail: msg="Metron Requires Ansible 2.0.0.2 or 2.2.2.0, current version is {{ ansible_version }}"
-      when: "ansible_version.full | version_compare('2.2.2.0', '!=') and ansible_version.full | version_compare('2.0.0.2', '!=')"
+      fail: msg="Metron Requires Ansible 2.0.0.2, 2.2.2.0 or 2.5.0, current version is {{ ansible_version }}"
+      when: "ansible_version.full | version_compare('2.2.2.0', '!=') and ansible_version.full | version_compare('2.0.0.2', '!=') and ansible_version.full | version_compare('2.5.0', '!=')"
   tasks:
     - include: tasks/create-keypair.yml
     - include: tasks/create-vpc.yml

http://git-wip-us.apache.org/repos/asf/metron/blob/ea6992fd/metron-deployment/ansible/playbooks/metron_full_install.yml
----------------------------------------------------------------------
diff --git a/metron-deployment/ansible/playbooks/metron_full_install.yml b/metron-deployment/ansible/playbooks/metron_full_install.yml
index b517671..099d810 100644
--- a/metron-deployment/ansible/playbooks/metron_full_install.yml
+++ b/metron-deployment/ansible/playbooks/metron_full_install.yml
@@ -18,8 +18,8 @@
 - hosts: all
   pre_tasks:
     - name: Verify Ansible Version
-      fail: msg="Metron Requires Ansible 2.0.0.2 or 2.2.2.0, current version is {{ ansible_version }}"
-      when: "ansible_version.full | version_compare('2.2.2.0', '!=') and ansible_version.full | version_compare('2.0.0.2', '!=')"
+      fail: msg="Metron Requires Ansible 2.0.0.2, 2.2.2.0, or 2.5.0, current version is {{ ansible_version }}"
+      when: "ansible_version.full | version_compare('2.2.2.0', '!=') and ansible_version.full | version_compare('2.0.0.2', '!=') and ansible_version.full | version_compare('2.5.0', '!=')"
 
 - include: metron_build.yml
   tags:

http://git-wip-us.apache.org/repos/asf/metron/blob/ea6992fd/metron-deployment/development/centos6/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/development/centos6/README.md b/metron-deployment/development/centos6/README.md
index bd8553c..ec85be3 100644
--- a/metron-deployment/development/centos6/README.md
+++ b/metron-deployment/development/centos6/README.md
@@ -29,7 +29,7 @@ Getting Started
 
 The computer used to deploy Apache Metron will need to have the following components installed.
 
- - [Ansible](https://github.com/ansible/ansible) (2.0.0.2 or 2.2.2.0)
+ - [Ansible](https://github.com/ansible/ansible) (2.0.0.2, 2.2.2.0, or 2.5.0)
  - [Docker](https://www.docker.com/community-edition)
  - [Vagrant](https://www.vagrantup.com) 2.0+
  - [Vagrant Hostmanager Plugin](https://github.com/devopsgroup-io/vagrant-hostmanager)

http://git-wip-us.apache.org/repos/asf/metron/blob/ea6992fd/metron-deployment/development/ubuntu14/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/development/ubuntu14/README.md b/metron-deployment/development/ubuntu14/README.md
index 5856911..fbbd2ad 100644
--- a/metron-deployment/development/ubuntu14/README.md
+++ b/metron-deployment/development/ubuntu14/README.md
@@ -29,7 +29,7 @@ Getting Started
 
 The computer used to deploy Apache Metron will need to have the following components installed.
 
- - [Ansible](https://github.com/ansible/ansible) (2.0.0.2 or 2.2.2.0)
+ - [Ansible](https://github.com/ansible/ansible) (2.0.0.2, 2.2.2.0, or 2.5.0)
  - [Docker](https://www.docker.com/community-edition)
  - [Vagrant](https://www.vagrantup.com) 2.0+
  - [Vagrant Hostmanager Plugin](https://github.com/devopsgroup-io/vagrant-hostmanager)


[14/50] [abbrv] metron git commit: METRON-1491: The indexing topology restart logic is wrong (cstella via mmiklavc) closes apache/metron#964

Posted by rm...@apache.org.
METRON-1491: The indexing topology restart logic is wrong (cstella via mmiklavc) closes apache/metron#964


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/5ed9631a
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/5ed9631a
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/5ed9631a

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 5ed9631a2936ec60d0ea6557ca4396cffdadc688
Parents: 3083b47
Author: cstella <ce...@gmail.com>
Authored: Tue Mar 20 16:08:02 2018 -0600
Committer: Michael Miklavcic <mi...@gmail.com>
Committed: Tue Mar 20 16:08:02 2018 -0600

----------------------------------------------------------------------
 .../package/scripts/indexing_commands.py        | 43 ++++++++++++++++----
 1 file changed, 35 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/5ed9631a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py
index 4c862f0..fd78119 100755
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/indexing_commands.py
@@ -181,7 +181,7 @@ class IndexingCommands:
     def start_batch_indexing_topology(self, env):
         Logger.info('Starting ' + self.__batch_indexing_topology)
 
-        if not self.is_topology_active(env):
+        if not self.is_batch_topology_active(env):
             if self.__params.security_enabled:
                 metron_security.kinit(self.__params.kinit_path_local,
                                       self.__params.metron_keytab_path,
@@ -200,7 +200,7 @@ class IndexingCommands:
     def start_random_access_indexing_topology(self, env):
         Logger.info('Starting ' + self.__random_access_indexing_topology)
 
-        if not self.is_topology_active(env):
+        if not self.is_random_access_topology_active(env):
             if self.__params.security_enabled:
                 metron_security.kinit(self.__params.kinit_path_local,
                                       self.__params.metron_keytab_path,
@@ -263,21 +263,48 @@ class IndexingCommands:
 
     def restart_indexing_topology(self, env):
         Logger.info('Restarting the indexing topologies')
-        self.stop_indexing_topology(env)
+        self.restart_batch_indexing_topology(env)
+        self.restart_random_access_indexing_topology(env)
+
+    def restart_batch_indexing_topology(self, env):
+        Logger.info('Restarting the batch indexing topology')
+        self.stop_batch_indexing_topology(env)
+
+        # Wait for old topology to be cleaned up by Storm, before starting again.
+        retries = 0
+        topology_active = self.is_batch_topology_active(env)
+        while topology_active and retries < 3:
+            Logger.info('Existing batch topology still active. Will wait and retry')
+            time.sleep(10)
+            retries += 1
+            topology_active = self.is_batch_topology_active(env)
+
+        if not topology_active:
+            Logger.info('Waiting for storm kill to complete')
+            time.sleep(30)
+            self.start_batch_indexing_topology(env)
+            Logger.info('Done restarting the batch indexing topology')
+        else:
+            Logger.warning('Retries exhausted. Existing topology not cleaned up.  Aborting topology start.')
+
+    def restart_random_access_indexing_topology(self, env):
+        Logger.info('Restarting the random access indexing topology')
+        self.stop_random_access_indexing_topology(env)
 
         # Wait for old topology to be cleaned up by Storm, before starting again.
         retries = 0
-        topology_active = self.is_topology_active(env)
-        while self.is_topology_active(env) and retries < 3:
-            Logger.info('Existing topology still active. Will wait and retry')
+        topology_active = self.is_random_access_topology_active(env)
+        while topology_active and retries < 3:
+            Logger.info('Existing random access topology still active. Will wait and retry')
             time.sleep(10)
             retries += 1
+            topology_active = self.is_random_access_topology_active(env)
 
         if not topology_active:
             Logger.info('Waiting for storm kill to complete')
             time.sleep(30)
-            self.start_indexing_topology(env)
-            Logger.info('Done restarting the indexing topologies')
+            self.start_random_access_indexing_topology(env)
+            Logger.info('Done restarting the random access indexing topology')
         else:
             Logger.warning('Retries exhausted. Existing topology not cleaned up.  Aborting topology start.')
 


[27/50] [abbrv] metron git commit: METRON-1504: Enriching missing values does not match the semantics between the new enrichment topology and old closes apache/incubator-metron#976

Posted by rm...@apache.org.
METRON-1504: Enriching missing values does not match the semantics between the new enrichment topology and old closes apache/incubator-metron#976


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/3ba9ae25
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/3ba9ae25
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/3ba9ae25

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 3ba9ae25126bc393e3b9307f4ffe63ac66a909f4
Parents: 46bc63d
Author: cstella <ce...@gmail.com>
Authored: Mon Apr 9 11:47:51 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Mon Apr 9 11:47:51 2018 -0400

----------------------------------------------------------------------
 .../enrichment/parallel/ParallelEnricher.java   |  3 +
 .../integration/EnrichmentIntegrationTest.java  | 12 +++-
 .../parallel/ParallelEnricherTest.java          | 59 +++++++++++++++++++-
 3 files changed, 70 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/3ba9ae25/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java b/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java
index 6ddb892..b10c148 100644
--- a/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java
+++ b/metron-platform/metron-enrichment/src/main/java/org/apache/metron/enrichment/parallel/ParallelEnricher.java
@@ -170,6 +170,9 @@ public class ParallelEnricher {
         for(Object o : m.keySet()) {
           String field = (String) o;
           Object value = m.get(o);
+          if(value == null) {
+            continue;
+          }
           CacheKey cacheKey = new CacheKey(field, value, config);
           String prefix = adapter.getOutputPrefix(cacheKey);
           Supplier<JSONObject> supplier = () -> {

http://git-wip-us.apache.org/repos/asf/metron/blob/3ba9ae25/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java
index 267ca62..3c55c95 100644
--- a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java
+++ b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/integration/EnrichmentIntegrationTest.java
@@ -95,7 +95,15 @@ public class EnrichmentIntegrationTest extends BaseIntegrationTest {
 
   private static List<byte[]> getInputMessages(String path){
     try{
-      return TestUtils.readSampleData(path);
+      List<byte[]> ret = TestUtils.readSampleData(path);
+      {
+        //we want one of the fields without a destination IP to ensure that enrichments can function
+        Map<String, Object> sansDestinationIp = JSONUtils.INSTANCE.load(new String(ret.get(ret.size() -1))
+                                                                       , JSONUtils.MAP_SUPPLIER);
+        sansDestinationIp.remove(Constants.Fields.DST_ADDR.getName());
+        ret.add(JSONUtils.INSTANCE.toJSONPretty(sansDestinationIp));
+      }
+      return ret;
     }catch(IOException ioe){
       return null;
     }
@@ -262,8 +270,6 @@ public class EnrichmentIntegrationTest extends BaseIntegrationTest {
 
     //ensure we always have a source ip and destination ip
     Assert.assertNotNull(jsonDoc.get(SRC_IP));
-    Assert.assertNotNull(jsonDoc.get(DST_IP));
-
     Assert.assertNotNull(jsonDoc.get("ALL_CAPS"));
     Assert.assertNotNull(jsonDoc.get("map.blah"));
     Assert.assertNull(jsonDoc.get("map"));

http://git-wip-us.apache.org/repos/asf/metron/blob/3ba9ae25/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java
index 4a4573b..d4fcdf4 100644
--- a/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java
+++ b/metron-platform/metron-enrichment/src/test/java/org/apache/metron/enrichment/parallel/ParallelEnricherTest.java
@@ -79,7 +79,39 @@ public class ParallelEnricherTest {
       }
     }.ofType("ENRICHMENT");
     adapter.initializeAdapter(new HashMap<>());
-    enrichmentsByType = ImmutableMap.of("stellar", adapter);
+    EnrichmentAdapter<CacheKey> dummy = new EnrichmentAdapter<CacheKey>() {
+      @Override
+      public void logAccess(CacheKey value) {
+
+      }
+
+      @Override
+      public JSONObject enrich(CacheKey value) {
+        return null;
+      }
+
+      @Override
+      public boolean initializeAdapter(Map<String, Object> config) {
+        return false;
+      }
+
+      @Override
+      public void updateAdapter(Map<String, Object> config) {
+
+      }
+
+      @Override
+      public void cleanup() {
+
+      }
+
+      @Override
+      public String getOutputPrefix(CacheKey value) {
+        return null;
+      }
+    };
+
+    enrichmentsByType = ImmutableMap.of("stellar", adapter, "dummy", dummy);
     enricher = new ParallelEnricher(enrichmentsByType, infrastructure, false);
   }
 
@@ -115,6 +147,31 @@ public class ParallelEnricherTest {
     Assert.assertEquals("TEST", ret.get("ALL_CAPS"));
     Assert.assertEquals(0, result.getEnrichmentErrors().size());
   }
+/**
+   * {
+  "enrichment": {
+    "fieldMap": {
+      "dummy" : ["notthere"]
+    }
+  ,"fieldToTypeMap": { }
+  },
+  "threatIntel": { }
+}
+   */
+  @Multiline
+  public static String nullConfig;
+
+  @Test
+  public void testNullEnrichment() throws Exception {
+    SensorEnrichmentConfig config = JSONUtils.INSTANCE.load(nullConfig, SensorEnrichmentConfig.class);
+    config.getConfiguration().putIfAbsent("stellarContext", stellarContext);
+    JSONObject message = new JSONObject() {{
+      put(Constants.SENSOR_TYPE, "test");
+    }};
+    ParallelEnricher.EnrichmentResult result = enricher.apply(message, EnrichmentStrategies.ENRICHMENT, config, null);
+    JSONObject ret = result.getResult();
+    Assert.assertEquals("Got the wrong result count: " + ret, 4, ret.size());
+  }
 
   /**
    * {


[29/50] [abbrv] metron git commit: METRON-1518 Build Failure When Using Profile HDP-2.5.0.0 (nickwallen) closes apache/metron#986

Posted by rm...@apache.org.
METRON-1518 Build Failure When Using Profile HDP-2.5.0.0 (nickwallen) closes apache/metron#986


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/ed50d48b
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/ed50d48b
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/ed50d48b

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: ed50d48bb47cf3f301884f6e18fe4efc8c1b91f1
Parents: a8b555d
Author: nickwallen <ni...@nickallen.org>
Authored: Tue Apr 10 17:16:20 2018 -0400
Committer: nickallen <ni...@apache.org>
Committed: Tue Apr 10 17:16:20 2018 -0400

----------------------------------------------------------------------
 .../profiler/bolt/ProfileBuilderBolt.java       | 51 +++++---------------
 1 file changed, 11 insertions(+), 40 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/ed50d48b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
index fb3d2d0..ca02b58 100644
--- a/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
+++ b/metron-analytics/metron-profiler/src/main/java/org/apache/metron/profiler/bolt/ProfileBuilderBolt.java
@@ -42,13 +42,11 @@ import org.apache.metron.stellar.common.utils.ConversionUtils;
 import org.apache.metron.stellar.dsl.Context;
 import org.apache.metron.zookeeper.SimpleEventListener;
 import org.apache.metron.zookeeper.ZKCache;
-import org.apache.storm.StormTimer;
 import org.apache.storm.task.OutputCollector;
 import org.apache.storm.task.TopologyContext;
 import org.apache.storm.topology.OutputFieldsDeclarer;
 import org.apache.storm.topology.base.BaseWindowedBolt;
 import org.apache.storm.tuple.Tuple;
-import org.apache.storm.utils.Utils;
 import org.apache.storm.windowing.TupleWindow;
 import org.json.simple.JSONObject;
 import org.json.simple.parser.JSONParser;
@@ -59,9 +57,9 @@ import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-import java.util.function.Supplier;
 
 import static java.lang.String.format;
 import static org.apache.metron.profiler.bolt.ProfileSplitterBolt.ENTITY_TUPLE_FIELD;
@@ -155,8 +153,8 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
   private FlushSignal activeFlushSignal;
 
   /**
-   * A timer that flushes expired profiles on a regular interval. The expired profiles
-   * are flushed on a separate thread.
+   * An executor that flushes expired profiles at a regular interval on a separate
+   * thread.
    *
    * <p>Flushing expired profiles ensures that any profiles that stop receiving messages
    * for an extended period of time will continue to be flushed.
@@ -164,7 +162,7 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
    * <p>This introduces concurrency issues as the bolt is no longer single threaded. Due
    * to this, all access to the {@code MessageDistributor} needs to be protected.
    */
-  private StormTimer expiredFlushTimer;
+  private transient ScheduledExecutorService flushExpiredExecutor;
 
   public ProfileBuilderBolt() {
     this.emitters = new ArrayList<>();
@@ -202,7 +200,7 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
     this.configurations = new ProfilerConfigurations();
     this.activeFlushSignal = new FixedFrequencyFlushSignal(periodDurationMillis);
     setupZookeeper();
-    startExpiredFlushTimer();
+    startFlushingExpiredProfiles();
   }
 
   @Override
@@ -210,7 +208,7 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
     try {
       zookeeperCache.close();
       zookeeperClient.close();
-      expiredFlushTimer.close();
+      flushExpiredExecutor.shutdown();
 
     } catch(Throwable e) {
       LOG.error("Exception when cleaning up", e);
@@ -421,39 +419,12 @@ public class ProfileBuilderBolt extends BaseWindowedBolt implements Reloadable {
   }
 
   /**
-   * Converts milliseconds to seconds and handles an ugly cast.
-   *
-   * @param millis Duration in milliseconds.
-   * @return Duration in seconds.
-   */
-  private int toSeconds(long millis) {
-    return (int) TimeUnit.MILLISECONDS.toSeconds(millis);
-  }
-
-  /**
-   * Creates a timer that regularly flushes expired profiles on a separate thread.
-   */
-  private void startExpiredFlushTimer() {
-
-    expiredFlushTimer = createTimer("flush-expired-profiles-timer");
-    expiredFlushTimer.scheduleRecurring(0, toSeconds(profileTimeToLiveMillis), () -> flushExpired());
-  }
-
-  /**
-   * Creates a timer that can execute a task on a fixed interval.
-   *
-   * <p>If the timer encounters an exception, the entire process will be killed.
-   *
-   * @param name The name of the timer.
-   * @return The timer.
+   * Creates a separate thread that regularly flushes expired profiles.
    */
-  private StormTimer createTimer(String name) {
+  private void startFlushingExpiredProfiles() {
 
-    return new StormTimer(name, (thread, exception) -> {
-      String msg = String.format("Unexpected exception in timer task; timer=%s", name);
-      LOG.error(msg, exception);
-      Utils.exitProcess(1, msg);
-    });
+    flushExpiredExecutor = Executors.newSingleThreadScheduledExecutor();
+    flushExpiredExecutor.scheduleAtFixedRate(() -> flushExpired(), 0, profileTimeToLiveMillis, TimeUnit.MILLISECONDS);
   }
 
   @Override


[48/50] [abbrv] metron git commit: METRON-1539: Specialized RENAME field transformer closes apache/incubator-metron#1002

Posted by rm...@apache.org.
METRON-1539: Specialized RENAME field transformer closes apache/incubator-metron#1002


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/2b4f0b84
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/2b4f0b84
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/2b4f0b84

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: 2b4f0b84062d65f9400421d66ec3b7d6d093bebf
Parents: 1c5435c
Author: cstella <ce...@gmail.com>
Authored: Wed Apr 25 11:49:56 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Wed Apr 25 11:49:56 2018 -0400

----------------------------------------------------------------------
 .../common/configuration/FieldTransformer.java  |  4 +-
 .../transformation/FieldTransformations.java    |  1 +
 .../transformation/RenameTransformation.java    | 55 +++++++++++
 .../transformation/FieldTransformationTest.java | 17 +---
 .../RenameTransformationTest.java               | 99 ++++++++++++++++++++
 metron-platform/metron-parsers/README.md        | 25 ++++-
 6 files changed, 183 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/2b4f0b84/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/FieldTransformer.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/FieldTransformer.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/FieldTransformer.java
index df80691..43ce9d8 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/FieldTransformer.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/configuration/FieldTransformer.java
@@ -94,7 +94,9 @@ public class FieldTransformer implements Serializable {
 
       if (output == null || output.isEmpty()) {
         if (input == null || input.isEmpty()) {
-          throw new IllegalStateException("You must specify an input field if you want to leave the output fields empty");
+          //both are empty, so let's set them both to null
+          output = null;
+          input = null;
         } else {
           output = input;
         }

http://git-wip-us.apache.org/repos/asf/metron/blob/2b4f0b84/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/FieldTransformations.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/FieldTransformations.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/FieldTransformations.java
index a905123..95ff390 100644
--- a/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/FieldTransformations.java
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/FieldTransformations.java
@@ -25,6 +25,7 @@ public enum FieldTransformations {
   ,REMOVE(new RemoveTransformation())
   ,STELLAR(new StellarTransformation())
   ,SELECT(new SelectTransformation())
+  ,RENAME(new RenameTransformation())
   ;
   FieldTransformation mapping;
   FieldTransformations(FieldTransformation mapping) {

http://git-wip-us.apache.org/repos/asf/metron/blob/2b4f0b84/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/RenameTransformation.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/RenameTransformation.java b/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/RenameTransformation.java
new file mode 100644
index 0000000..f8b9374
--- /dev/null
+++ b/metron-platform/metron-common/src/main/java/org/apache/metron/common/field/transformation/RenameTransformation.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.metron.common.field.transformation;
+
+import org.apache.metron.stellar.dsl.Context;
+
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+public class RenameTransformation implements FieldTransformation{
+  @Override
+  public Map<String, Object> map( Map<String, Object> input
+                                , List<String> outputField
+                                , LinkedHashMap<String, Object> fieldMappingConfig
+                                , Context context
+                                , Map<String, Object>... sensorConfig
+                                )
+  {
+    if(fieldMappingConfig == null || fieldMappingConfig.isEmpty()) {
+      return input;
+    }
+    Map<String, Object> ret = new HashMap<>();
+    for(Map.Entry<String, Object> kv : input.entrySet()) {
+      Object renamed = fieldMappingConfig.get(kv.getKey());
+      if(renamed != null) {
+        //if we're renaming, then we want to copy the field to the new name
+        ret.put(renamed.toString(), kv.getValue());
+        //and remove the old field
+        ret.put(kv.getKey(), null);
+      }
+      else {
+        ret.put(kv.getKey(), kv.getValue());
+      }
+    }
+    return ret;
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/2b4f0b84/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/FieldTransformationTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/FieldTransformationTest.java b/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/FieldTransformationTest.java
index 71a0298..b7557e8 100644
--- a/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/FieldTransformationTest.java
+++ b/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/FieldTransformationTest.java
@@ -85,18 +85,6 @@ public class FieldTransformationTest {
    {
     "fieldTransformations" : [
           {
-           "transformation" : "IP_PROTOCOL"
-          }
-                      ]
-   }
-   */
-  @Multiline
-  public static String badConfigMissingInput;
-
-  /**
-   {
-    "fieldTransformations" : [
-          {
             "input" : "protocol"
           }
                       ]
@@ -113,10 +101,7 @@ public class FieldTransformationTest {
     Assert.assertEquals(ImmutableList.of("protocol"), c.getFieldTransformations().get(0).getInput());
   }
 
-  @Test(expected = IllegalStateException.class)
-  public void testInValidSerde_missingInput() throws IOException {
-    SensorParserConfig.fromBytes(Bytes.toBytes(badConfigMissingInput));
-  }
+
 
   @Test(expected = IllegalStateException.class)
   public void testInValidSerde_missingMapping() throws IOException {

http://git-wip-us.apache.org/repos/asf/metron/blob/2b4f0b84/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/RenameTransformationTest.java
----------------------------------------------------------------------
diff --git a/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/RenameTransformationTest.java b/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/RenameTransformationTest.java
new file mode 100644
index 0000000..cacc818
--- /dev/null
+++ b/metron-platform/metron-common/src/test/java/org/apache/metron/common/field/transformation/RenameTransformationTest.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.metron.common.field.transformation;
+
+import com.google.common.collect.Iterables;
+import org.adrianwalker.multilinestring.Multiline;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.metron.common.configuration.FieldTransformer;
+import org.apache.metron.common.configuration.SensorParserConfig;
+import org.apache.metron.stellar.dsl.Context;
+import org.json.simple.JSONObject;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+public class RenameTransformationTest {
+  /**
+   {
+    "fieldTransformations" : [
+          {
+            "transformation" : "RENAME",
+            "config" : {
+              "old_field1" : "new_field1",
+              "old_field2" : "new_field2"
+                      }
+          }
+                             ]
+   }
+   */
+  @Multiline
+  public static String smoketestConfig;
+
+  @Test
+  public void smokeTest() throws Exception {
+    SensorParserConfig c = SensorParserConfig.fromBytes(Bytes.toBytes(smoketestConfig));
+    FieldTransformer handler = Iterables.getFirst(c.getFieldTransformations(), null);
+    JSONObject input = new JSONObject(new HashMap<String, Object>() {{
+      for(int i = 1;i <= 10;++i) {
+        put("old_field" + i, "f" + i);
+      }
+    }});
+    handler.transformAndUpdate(input, Context.EMPTY_CONTEXT());
+    Assert.assertEquals("f1", input.get("new_field1"));
+    Assert.assertEquals("f2", input.get("new_field2"));
+    for(int i = 3;i <= 10;++i) {
+      Assert.assertEquals("f" + i, input.get("old_field" + i));
+    }
+    Assert.assertFalse(input.containsKey("old_field1"));
+    Assert.assertFalse(input.containsKey("old_field2"));
+    Assert.assertEquals(10, input.size());
+  }
+
+  /**
+   {
+    "fieldTransformations" : [
+          {
+            "transformation" : "RENAME",
+            "config" : {
+              "old_field1" : "new_field1"
+                      }
+          }
+                             ]
+   }
+   */
+  @Multiline
+  public static String renameMissingField;
+  @Test
+  public void renameMissingField() throws Exception {
+    SensorParserConfig c = SensorParserConfig.fromBytes(Bytes.toBytes(renameMissingField));
+    FieldTransformer handler = Iterables.getFirst(c.getFieldTransformations(), null);
+    JSONObject input = new JSONObject(new HashMap<String, Object>() {{
+      for(int i = 2;i <= 10;++i) {
+        put("old_field" + i, "f" + i);
+      }
+    }});
+    handler.transformAndUpdate(input, Context.EMPTY_CONTEXT());
+    Assert.assertFalse(input.containsKey("new_field1"));
+    for(int i = 2;i <= 10;++i) {
+      Assert.assertEquals("f" + i, input.get("old_field" + i));
+    }
+    Assert.assertEquals(9, input.size());
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/2b4f0b84/metron-platform/metron-parsers/README.md
----------------------------------------------------------------------
diff --git a/metron-platform/metron-parsers/README.md b/metron-platform/metron-parsers/README.md
index 1d2d834..e8b2896 100644
--- a/metron-platform/metron-parsers/README.md
+++ b/metron-platform/metron-parsers/README.md
@@ -313,10 +313,33 @@ into `{ "protocol" : "TCP", "source.type" : "bro", ...}`
 * `STELLAR` : This transformation executes a set of transformations
   expressed as [Stellar Language](../metron-common) statements.
 
+* `RENAME` : This transformation allows users to rename a set of fields.  Specifically,
+the config is presumed to be the mapping.  The keys to the config are the existing field names
+and the values for the config map are the associated new field name.
+
+The following config will rename the fields `old_field` and `different_old_field` to
+`new_field` and `different_new_field` respectively:
+```
+{
+...
+    "fieldTransformations" : [
+          {
+            "transformation" : "RENAME",
+          , "config" : {
+            "old_field" : "new_field",
+            "different_old_field" : "different_new_field"
+                       }
+          }
+                      ]
+}
+```
+
+
 ### Assignment to `null`
 
 If, in your field transformation, you assign a field to `null`, the field will be removed.
-You can use this capability to rename variables.
+You can use this capability to rename variables.  It is preferred, however, that the `RENAME`
+field transformation is used in this situation as it is less awkward.
 
 Consider this example:
 ```


[11/50] [abbrv] metron git commit: METRON-590 Enable Use of Event Time in Profiler (nickwallen) closes apache/metron#965

Posted by rm...@apache.org.
http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignalTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignalTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignalTest.java
new file mode 100644
index 0000000..b8949c5
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/FixedFrequencyFlushSignalTest.java
@@ -0,0 +1,71 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.metron.profiler.bolt;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests the {@code FixedFrequencyFlushSignal} class.
+ */
+public class FixedFrequencyFlushSignalTest {
+
+  @Test
+  public void testSignalFlush() {
+
+    FixedFrequencyFlushSignal signal = new FixedFrequencyFlushSignal(1000);
+
+    // not time to flush yet
+    assertFalse(signal.isTimeToFlush());
+
+    // advance time
+    signal.update(5000);
+
+    // not time to flush yet
+    assertFalse(signal.isTimeToFlush());
+
+    // advance time
+    signal.update(7000);
+
+    // time to flush
+    assertTrue(signal.isTimeToFlush());
+  }
+
+  @Test
+  public void testOutOfOrderTimestamps() {
+    FixedFrequencyFlushSignal signal = new FixedFrequencyFlushSignal(1000);
+
+    // advance time, out-of-order
+    signal.update(5000);
+    signal.update(1000);
+    signal.update(7000);
+    signal.update(3000);
+
+    // need to flush @ 5000 + 1000 = 6000. if anything > 6000 (even out-of-order), then it should signal a flush
+    assertTrue(signal.isTimeToFlush());
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testNegativeFrequency() {
+    new FixedFrequencyFlushSignal(-1000);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaDestinationHandlerTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaDestinationHandlerTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaDestinationHandlerTest.java
deleted file mode 100644
index c3f2584..0000000
--- a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaDestinationHandlerTest.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.metron.profiler.bolt;
-
-import com.google.common.collect.ImmutableMap;
-import org.adrianwalker.multilinestring.Multiline;
-import org.apache.metron.common.configuration.profiler.ProfileConfig;
-import org.apache.metron.common.utils.JSONUtils;
-import org.apache.metron.profiler.ProfileMeasurement;
-import org.apache.metron.statistics.OnlineStatisticsProvider;
-import org.apache.storm.task.OutputCollector;
-import org.apache.storm.tuple.Values;
-import org.json.simple.JSONObject;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import static org.mockito.Mockito.*;
-import static org.junit.Assert.*;
-
-/**
- * Tests the KafkaDestinationHandler.
- */
-public class KafkaDestinationHandlerTest {
-
-  /**
-   * {
-   *   "profile": "profile-one-destination",
-   *   "foreach": "ip_src_addr",
-   *   "init":   { "x": "0" },
-   *   "update": { "x": "x + 1" },
-   *   "result": "x"
-   * }
-   */
-  @Multiline
-  private String profileDefinition;
-
-  private KafkaDestinationHandler handler;
-  private ProfileConfig profile;
-  private OutputCollector collector;
-
-  @Before
-  public void setup() throws Exception {
-    handler = new KafkaDestinationHandler();
-    profile = createDefinition(profileDefinition);
-    collector = Mockito.mock(OutputCollector.class);
-  }
-
-  /**
-   * The handler must serialize the ProfileMeasurement into a JSONObject.
-   */
-  @Test
-  public void testSerialization() throws Exception {
-
-    ProfileMeasurement measurement = new ProfileMeasurement()
-            .withProfileName("profile")
-            .withEntity("entity")
-            .withPeriod(20000, 15, TimeUnit.MINUTES)
-            .withTriageValues(Collections.singletonMap("triage-key", "triage-value"))
-            .withDefinition(profile);
-    handler.emit(measurement, collector);
-
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
-    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
-
-    // expect a JSONObject
-    Values values = arg.getValue();
-    assertTrue(values.get(0) instanceof JSONObject);
-
-    // validate the json
-    JSONObject actual = (JSONObject) values.get(0);
-    assertEquals(measurement.getDefinition().getProfile(), actual.get("profile"));
-    assertEquals(measurement.getEntity(), actual.get("entity"));
-    assertEquals(measurement.getPeriod().getPeriod(), actual.get("period"));
-    assertEquals(measurement.getPeriod().getStartTimeMillis(), actual.get("period.start"));
-    assertEquals(measurement.getPeriod().getEndTimeMillis(), actual.get("period.end"));
-    assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
-    assertNotNull(actual.get("timestamp"));
-    assertEquals("profiler", actual.get("source.type"));
-  }
-
-  /**
-   * Values destined for Kafka can only be serialized into text, which limits the types of values
-   * that can result from a triage expression.  Only primitive types and Strings are allowed.
-   */
-  @Test
-  public void testInvalidType() throws Exception {
-
-    // create one invalid expression and one valid expression
-    Map<String, Object> triageValues = ImmutableMap.of(
-            "invalid", new OnlineStatisticsProvider(),
-            "valid", 4);
-
-    ProfileMeasurement measurement = new ProfileMeasurement()
-            .withProfileName("profile")
-            .withEntity("entity")
-            .withPeriod(20000, 15, TimeUnit.MINUTES)
-            .withTriageValues(triageValues)
-            .withDefinition(profile);
-    handler.emit(measurement, collector);
-
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
-    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
-    Values values = arg.getValue();
-    assertTrue(values.get(0) instanceof JSONObject);
-
-    // only the triage expression value itself should have been skipped, all others should be there
-    JSONObject actual = (JSONObject) values.get(0);
-    assertEquals(measurement.getDefinition().getProfile(), actual.get("profile"));
-    assertEquals(measurement.getEntity(), actual.get("entity"));
-    assertEquals(measurement.getPeriod().getPeriod(), actual.get("period"));
-    assertEquals(measurement.getPeriod().getStartTimeMillis(), actual.get("period.start"));
-    assertEquals(measurement.getPeriod().getEndTimeMillis(), actual.get("period.end"));
-    assertNotNull(actual.get("timestamp"));
-    assertEquals("profiler", actual.get("source.type"));
-
-    // the invalid expression should be skipped due to invalid type
-    assertFalse(actual.containsKey("invalid"));
-
-    // but the valid expression should still be there
-    assertEquals(triageValues.get("valid"), actual.get("valid"));
-  }
-
-  /**
-   * Values destined for Kafka can only be serialized into text, which limits the types of values
-   * that can result from a triage expression.  Only primitive types and Strings are allowed.
-   */
-  @Test
-  public void testIntegerIsValidType() throws Exception {
-    ProfileMeasurement measurement = new ProfileMeasurement()
-            .withProfileName("profile")
-            .withEntity("entity")
-            .withPeriod(20000, 15, TimeUnit.MINUTES)
-            .withTriageValues(Collections.singletonMap("triage-key", 123))
-            .withDefinition(profile);
-    handler.emit(measurement, collector);
-
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
-    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
-    Values values = arg.getValue();
-    assertTrue(values.get(0) instanceof JSONObject);
-    JSONObject actual = (JSONObject) values.get(0);
-
-    // the triage expression is valid
-    assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
-  }
-
-  /**
-   * Values destined for Kafka can only be serialized into text, which limits the types of values
-   * that can result from a triage expression.  Only primitive types and Strings are allowed.
-   */
-  @Test
-  public void testStringIsValidType() throws Exception {
-    ProfileMeasurement measurement = new ProfileMeasurement()
-            .withProfileName("profile")
-            .withEntity("entity")
-            .withPeriod(20000, 15, TimeUnit.MINUTES)
-            .withTriageValues(Collections.singletonMap("triage-key", "value"))
-            .withDefinition(profile);
-    handler.emit(measurement, collector);
-
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
-    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
-    Values values = arg.getValue();
-    assertTrue(values.get(0) instanceof JSONObject);
-    JSONObject actual = (JSONObject) values.get(0);
-
-    // the triage expression is valid
-    assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
-  }
-
-  /**
-   * Creates a profile definition based on a string of JSON.
-   * @param json The string of JSON.
-   */
-  private ProfileConfig createDefinition(String json) throws IOException {
-    return JSONUtils.INSTANCE.load(json, ProfileConfig.class);
-  }
-}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaEmitterTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaEmitterTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaEmitterTest.java
new file mode 100644
index 0000000..b02e377
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/KafkaEmitterTest.java
@@ -0,0 +1,208 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.metron.profiler.bolt;
+
+import com.google.common.collect.ImmutableMap;
+import org.adrianwalker.multilinestring.Multiline;
+import org.apache.metron.common.configuration.profiler.ProfileConfig;
+import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.profiler.ProfileMeasurement;
+import org.apache.metron.statistics.OnlineStatisticsProvider;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.tuple.Values;
+import org.json.simple.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Tests the KafkaDestinationHandler.
+ */
+public class KafkaEmitterTest {
+
+  /**
+   * {
+   *   "profile": "profile-one-destination",
+   *   "foreach": "ip_src_addr",
+   *   "init":   { "x": "0" },
+   *   "update": { "x": "x + 1" },
+   *   "result": "x"
+   * }
+   */
+  @Multiline
+  private String profileDefinition;
+
+  private KafkaEmitter handler;
+  private ProfileConfig profile;
+  private OutputCollector collector;
+
+  @Before
+  public void setup() throws Exception {
+    handler = new KafkaEmitter();
+    profile = createDefinition(profileDefinition);
+    collector = Mockito.mock(OutputCollector.class);
+  }
+
+  /**
+   * The handler must serialize the ProfileMeasurement into a JSONObject.
+   */
+  @Test
+  public void testSerialization() throws Exception {
+
+    ProfileMeasurement measurement = new ProfileMeasurement()
+            .withProfileName("profile")
+            .withEntity("entity")
+            .withPeriod(20000, 15, TimeUnit.MINUTES)
+            .withTriageValues(Collections.singletonMap("triage-key", "triage-value"))
+            .withDefinition(profile);
+    handler.emit(measurement, collector);
+
+    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
+    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
+
+    // expect a JSONObject
+    Values values = arg.getValue();
+    assertTrue(values.get(0) instanceof JSONObject);
+
+    // validate the json
+    JSONObject actual = (JSONObject) values.get(0);
+    assertEquals(measurement.getDefinition().getProfile(), actual.get("profile"));
+    assertEquals(measurement.getEntity(), actual.get("entity"));
+    assertEquals(measurement.getPeriod().getPeriod(), actual.get("period"));
+    assertEquals(measurement.getPeriod().getStartTimeMillis(), actual.get("period.start"));
+    assertEquals(measurement.getPeriod().getEndTimeMillis(), actual.get("period.end"));
+    assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
+    assertNotNull(actual.get("timestamp"));
+    assertEquals("profiler", actual.get("source.type"));
+  }
+
+  /**
+   * Values destined for Kafka can only be serialized into text, which limits the types of values
+   * that can result from a triage expression.  Only primitive types and Strings are allowed.
+   */
+  @Test
+  public void testInvalidType() throws Exception {
+
+    // create one invalid expression and one valid expression
+    Map<String, Object> triageValues = ImmutableMap.of(
+            "invalid", new OnlineStatisticsProvider(),
+            "valid", 4);
+
+    ProfileMeasurement measurement = new ProfileMeasurement()
+            .withProfileName("profile")
+            .withEntity("entity")
+            .withPeriod(20000, 15, TimeUnit.MINUTES)
+            .withTriageValues(triageValues)
+            .withDefinition(profile);
+    handler.emit(measurement, collector);
+
+    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
+    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
+    Values values = arg.getValue();
+    assertTrue(values.get(0) instanceof JSONObject);
+
+    // only the triage expression value itself should have been skipped, all others should be there
+    JSONObject actual = (JSONObject) values.get(0);
+    assertEquals(measurement.getDefinition().getProfile(), actual.get("profile"));
+    assertEquals(measurement.getEntity(), actual.get("entity"));
+    assertEquals(measurement.getPeriod().getPeriod(), actual.get("period"));
+    assertEquals(measurement.getPeriod().getStartTimeMillis(), actual.get("period.start"));
+    assertEquals(measurement.getPeriod().getEndTimeMillis(), actual.get("period.end"));
+    assertNotNull(actual.get("timestamp"));
+    assertEquals("profiler", actual.get("source.type"));
+
+    // the invalid expression should be skipped due to invalid type
+    assertFalse(actual.containsKey("invalid"));
+
+    // but the valid expression should still be there
+    assertEquals(triageValues.get("valid"), actual.get("valid"));
+  }
+
+  /**
+   * Values destined for Kafka can only be serialized into text, which limits the types of values
+   * that can result from a triage expression.  Only primitive types and Strings are allowed.
+   */
+  @Test
+  public void testIntegerIsValidType() throws Exception {
+    ProfileMeasurement measurement = new ProfileMeasurement()
+            .withProfileName("profile")
+            .withEntity("entity")
+            .withPeriod(20000, 15, TimeUnit.MINUTES)
+            .withTriageValues(Collections.singletonMap("triage-key", 123))
+            .withDefinition(profile);
+    handler.emit(measurement, collector);
+
+    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
+    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
+    Values values = arg.getValue();
+    assertTrue(values.get(0) instanceof JSONObject);
+    JSONObject actual = (JSONObject) values.get(0);
+
+    // the triage expression is valid
+    assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
+  }
+
+  /**
+   * Values destined for Kafka can only be serialized into text, which limits the types of values
+   * that can result from a triage expression.  Only primitive types and Strings are allowed.
+   */
+  @Test
+  public void testStringIsValidType() throws Exception {
+    ProfileMeasurement measurement = new ProfileMeasurement()
+            .withProfileName("profile")
+            .withEntity("entity")
+            .withPeriod(20000, 15, TimeUnit.MINUTES)
+            .withTriageValues(Collections.singletonMap("triage-key", "value"))
+            .withDefinition(profile);
+    handler.emit(measurement, collector);
+
+    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
+    verify(collector, times(1)).emit(eq(handler.getStreamId()), arg.capture());
+    Values values = arg.getValue();
+    assertTrue(values.get(0) instanceof JSONObject);
+    JSONObject actual = (JSONObject) values.get(0);
+
+    // the triage expression is valid
+    assertEquals(measurement.getTriageValues().get("triage-key"), actual.get("triage-key"));
+  }
+
+  /**
+   * Creates a profile definition based on a string of JSON.
+   * @param json The string of JSON.
+   */
+  private ProfileConfig createDefinition(String json) throws IOException {
+    return JSONUtils.INSTANCE.load(json, ProfileConfig.class);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java
index 21d61ab..78e20e0 100644
--- a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileBuilderBoltTest.java
@@ -20,35 +20,37 @@
 
 package org.apache.metron.profiler.bolt;
 
-import org.adrianwalker.multilinestring.Multiline;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.metron.common.configuration.profiler.ProfileConfig;
-import org.apache.metron.common.utils.JSONUtils;
+import org.apache.metron.common.configuration.profiler.ProfilerConfigurations;
+import org.apache.metron.profiler.MessageDistributor;
 import org.apache.metron.profiler.MessageRoute;
-import org.apache.metron.profiler.ProfileBuilder;
 import org.apache.metron.profiler.ProfileMeasurement;
-import org.apache.metron.stellar.dsl.Context;
+import org.apache.metron.profiler.integration.MessageBuilder;
 import org.apache.metron.test.bolt.BaseBoltTest;
-import org.apache.storm.Constants;
+import org.apache.storm.task.OutputCollector;
+import org.apache.storm.topology.OutputFieldsDeclarer;
+import org.apache.storm.topology.base.BaseWindowedBolt;
+import org.apache.storm.tuple.Fields;
 import org.apache.storm.tuple.Tuple;
 import org.apache.storm.tuple.Values;
+import org.apache.storm.windowing.TupleWindow;
 import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
 
-import static org.apache.metron.stellar.common.utils.ConversionUtils.convert;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.atLeastOnce;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -59,284 +61,348 @@ import static org.mockito.Mockito.when;
  */
 public class ProfileBuilderBoltTest extends BaseBoltTest {
 
-  /**
-   * {
-   *   "ip_src_addr": "10.0.0.1",
-   *   "value": "22"
-   * }
-   */
-  @Multiline
-  private String inputOne;
-  private JSONObject messageOne;
+  private JSONObject message1;
+  private JSONObject message2;
+  private ProfileConfig profile1;
+  private ProfileConfig profile2;
+  private ProfileMeasurementEmitter emitter;
+  private ManualFlushSignal flushSignal;
 
-  /**
-   * {
-   *   "ip_src_addr": "10.0.0.2",
-   *   "value": "22"
-   * }
-   */
-  @Multiline
-  private String inputTwo;
-  private JSONObject messageTwo;
+  @Before
+  public void setup() throws Exception {
+
+    message1 = new MessageBuilder()
+            .withField("ip_src_addr", "10.0.0.1")
+            .withField("value", "22")
+            .build();
+
+    message2 = new MessageBuilder()
+            .withField("ip_src_addr", "10.0.0.2")
+            .withField("value", "22")
+            .build();
+
+    profile1 = new ProfileConfig()
+            .withProfile("profile1")
+            .withForeach("ip_src_addr")
+            .withInit("x", "0")
+            .withUpdate("x", "x + 1")
+            .withResult("x");
+
+    profile2 = new ProfileConfig()
+            .withProfile("profile2")
+            .withForeach("ip_src_addr")
+            .withInit(Collections.singletonMap("x", "0"))
+            .withUpdate(Collections.singletonMap("x", "x + 1"))
+            .withResult("x");
+
+    flushSignal = new ManualFlushSignal();
+    flushSignal.setFlushNow(false);
+  }
 
   /**
-   * {
-   *   "profile": "profileOne",
-   *   "foreach": "ip_src_addr",
-   *   "init":   { "x": "0" },
-   *   "update": { "x": "x + 1" },
-   *   "result": "x"
-   * }
+   * The bolt should extract a message and timestamp from a tuple and
+   * pass that to a {@code MessageDistributor}.
    */
-  @Multiline
-  private String profileOne;
+  @Test
+  public void testExtractMessage() throws Exception {
 
+    ProfileBuilderBolt bolt = createBolt();
 
-  /**
-   * {
-   *   "profile": "profileTwo",
-   *   "foreach": "ip_src_addr",
-   *   "init":   { "x": "0" },
-   *   "update": { "x": "x + 1" },
-   *   "result": "x"
-   * }
-   */
-  @Multiline
-  private String profileTwo;
+    // create a mock
+    MessageDistributor distributor = mock(MessageDistributor.class);
+    bolt.withMessageDistributor(distributor);
 
-  public static Tuple mockTickTuple() {
-    Tuple tuple = mock(Tuple.class);
-    when(tuple.getSourceComponent()).thenReturn(Constants.SYSTEM_COMPONENT_ID);
-    when(tuple.getSourceStreamId()).thenReturn(Constants.SYSTEM_TICK_STREAM_ID);
-    return tuple;
-  }
+    // create a tuple
+    final long timestamp1 = 100000000L;
+    Tuple tuple1 = createTuple("entity1", message1, profile1, timestamp1);
 
-  @Before
-  public void setup() throws Exception {
-    JSONParser parser = new JSONParser();
-    messageOne = (JSONObject) parser.parse(inputOne);
-    messageTwo = (JSONObject) parser.parse(inputTwo);
+    // execute the bolt
+    TupleWindow tupleWindow = createWindow(tuple1);
+    bolt.execute(tupleWindow);
+
+    // the message should have been extracted from the tuple and passed to the MessageDistributor
+    verify(distributor).distribute(eq(message1), eq(timestamp1), any(MessageRoute.class), any());
   }
 
+
   /**
-   * Creates a profile definition based on a string of JSON.
-   * @param json The string of JSON.
+   * If the {@code FlushSignal} tells the bolt to flush, it should flush the {@code MessageDistributor}
+   * and emit the {@code ProfileMeasurement} values.
    */
-  private ProfileConfig createDefinition(String json) throws IOException {
-    return JSONUtils.INSTANCE.load(json, ProfileConfig.class);
+  @Test
+  public void testEmitWhenFlush() throws Exception {
+
+    ProfileBuilderBolt bolt = createBolt();
+
+    // create a profile measurement
+    ProfileMeasurement m = new ProfileMeasurement()
+            .withEntity("entity1")
+            .withProfileName("profile1")
+            .withPeriod(1000, 500, TimeUnit.MILLISECONDS)
+            .withProfileValue(22);
+
+    // create a mock that returns the profile measurement above
+    MessageDistributor distributor = mock(MessageDistributor.class);
+    when(distributor.flush()).thenReturn(Collections.singletonList(m));
+    bolt.withMessageDistributor(distributor);
+
+    // signal the bolt to flush
+    flushSignal.setFlushNow(true);
+
+    // execute the bolt
+    Tuple tuple1 = createTuple("entity1", message1, profile1, 1000L);
+    TupleWindow tupleWindow = createWindow(tuple1);
+    bolt.execute(tupleWindow);
+
+    // a profile measurement should be emitted by the bolt
+    List<ProfileMeasurement> measurements = getProfileMeasurements(outputCollector, 1);
+    assertEquals(1, measurements.size());
+    assertEquals(m, measurements.get(0));
   }
 
   /**
-   * Create a tuple that will contain the message, the entity name, and profile definition.
-   * @param entity The entity name
-   * @param message The telemetry message.
-   * @param profile The profile definition.
+   * If the {@code FlushSignal} tells the bolt NOT to flush, nothing should be emitted.
    */
-  private Tuple createTuple(String entity, JSONObject message, ProfileConfig profile) {
-    Tuple tuple = mock(Tuple.class);
-    when(tuple.getValueByField(eq("message"))).thenReturn(message);
-    when(tuple.getValueByField(eq("entity"))).thenReturn(entity);
-    when(tuple.getValueByField(eq("profile"))).thenReturn(profile);
-    return tuple;
+  @Test
+  public void testDoNotEmitWhenNoFlush() throws Exception {
+
+    ProfileBuilderBolt bolt = createBolt();
+
+    // create a profile measurement
+    ProfileMeasurement m = new ProfileMeasurement()
+            .withEntity("entity1")
+            .withProfileName("profile1")
+            .withPeriod(1000, 500, TimeUnit.MILLISECONDS)
+            .withProfileValue(22);
+
+    // create a mock that returns the profile measurement above
+    MessageDistributor distributor = mock(MessageDistributor.class);
+    when(distributor.flush()).thenReturn(Collections.singletonList(m));
+    bolt.withMessageDistributor(distributor);
+
+    // no flush signal
+    flushSignal.setFlushNow(false);
+
+    // execute the bolt
+    Tuple tuple1 = createTuple("entity1", message1, profile1, 1000L);
+    TupleWindow tupleWindow = createWindow(tuple1);
+    bolt.execute(tupleWindow);
+
+    // nothing should have been emitted
+    getProfileMeasurements(outputCollector, 0);
   }
 
   /**
-   * Create a ProfileBuilderBolt to test
+   * A {@link ProfileMeasurement} is built for each profile/entity pair.  The measurement should be emitted to each
+   * destination defined by the profile. By default, a profile uses both Kafka and HBase as destinations.
    */
-  private ProfileBuilderBolt createBolt() throws IOException {
+  @Test
+  public void testEmitters() throws Exception {
+
+    // defines the zk configurations accessible from the bolt
+    ProfilerConfigurations configurations = new ProfilerConfigurations();
+    configurations.updateGlobalConfig(Collections.emptyMap());
+
+    // create the bolt with 3 destinations
+    ProfileBuilderBolt bolt = (ProfileBuilderBolt) new ProfileBuilderBolt()
+            .withProfileTimeToLive(30, TimeUnit.MINUTES)
+            .withPeriodDuration(10, TimeUnit.MINUTES)
+            .withMaxNumberOfRoutes(Long.MAX_VALUE)
+            .withZookeeperClient(client)
+            .withZookeeperCache(cache)
+            .withEmitter(new TestEmitter("destination1"))
+            .withEmitter(new TestEmitter("destination2"))
+            .withEmitter(new TestEmitter("destination3"))
+            .withProfilerConfigurations(configurations)
+            .withTumblingWindow(new BaseWindowedBolt.Duration(10, TimeUnit.MINUTES));
+    bolt.prepare(new HashMap<>(), topologyContext, outputCollector);
 
-    ProfileBuilderBolt bolt = new ProfileBuilderBolt("zookeeperURL");
-    bolt.setCuratorFramework(client);
-    bolt.setZKCache(cache);
-    bolt.withPeriodDuration(10, TimeUnit.MINUTES);
-    bolt.withProfileTimeToLive(30, TimeUnit.MINUTES);
+    // signal the bolt to flush
+    bolt.withFlushSignal(flushSignal);
+    flushSignal.setFlushNow(true);
 
-    // define the valid destinations for the profiler
-    bolt.withDestinationHandler(new HBaseDestinationHandler());
-    bolt.withDestinationHandler(new KafkaDestinationHandler());
+    // execute the bolt
+    Tuple tuple1 = createTuple("entity", message1, profile1, System.currentTimeMillis());
+    TupleWindow window = createWindow(tuple1);
+    bolt.execute(window);
 
-    bolt.prepare(new HashMap<>(), topologyContext, outputCollector);
-    return bolt;
+    // validate measurements emitted to each
+    verify(outputCollector, times(1)).emit(eq("destination1"), any());
+    verify(outputCollector, times(1)).emit(eq("destination2"), any());
+    verify(outputCollector, times(1)).emit(eq("destination3"), any());
   }
 
-  /**
-   * The bolt should create a ProfileBuilder to manage a profile.
-   */
   @Test
-  public void testCreateProfileBuilder() throws Exception {
+  public void testFlushExpiredWithTick() throws Exception {
 
     ProfileBuilderBolt bolt = createBolt();
-    ProfileConfig definition = createDefinition(profileOne);
-    String entity = (String) messageOne.get("ip_src_addr");
-    Tuple tupleOne = createTuple(entity, messageOne, definition);
 
-    // execute - send two tuples with different entities
-    bolt.execute(tupleOne);
+    // create a mock
+    MessageDistributor distributor = mock(MessageDistributor.class);
+    bolt.withMessageDistributor(distributor);
+
+    // tell the bolt to flush on the first window
+    flushSignal.setFlushNow(true);
 
-    // validate - 1 messages applied
-    MessageRoute route = new MessageRoute(definition, entity);
-    ProfileBuilder builderOne = bolt.getMessageDistributor().getBuilder(route, Context.EMPTY_CONTEXT());
-    assertEquals(1, (int) convert(builderOne.valueOf("x"), Integer.class));
+    // execute the bolt; include a tick tuple in the window
+    Tuple tuple1 = createTuple("entity", message1, profile1, 100000000L);
+    TupleWindow tupleWindow = createWindow(tuple1, mockTickTuple());
+    bolt.execute(tupleWindow);
+
+    // ensure the expired profiles were flushed when the tick tuple was received
+    verify(distributor).flushExpired();
   }
 
-  /**
-   * This test creates two different messages, with different entities that are applied to
-   * the same profile.  The bolt should create separate ProfileBuilder objects to handle each
-   * profile/entity pair.
-   */
   @Test
-  public void testCreateProfileBuilderForEachEntity() throws Exception {
+  public void testFlushExpiredWithNoTick() throws Exception {
 
-    // setup
     ProfileBuilderBolt bolt = createBolt();
-    ProfileConfig definition = createDefinition(profileOne);
-
-    // apply a message to the profile
-    String entityOne = (String) messageOne.get("ip_src_addr");
-    Tuple tupleOne = createTuple(entityOne, messageOne, definition);
-    bolt.execute(tupleOne);
-    bolt.execute(tupleOne);
-
-    // apply a different message (with different entity) to the same profile
-    String entityTwo = (String) messageTwo.get("ip_src_addr");
-    Tuple tupleTwo = createTuple(entityTwo, messageTwo, definition);
-    bolt.execute(tupleTwo);
-
-    // validate - 2 messages applied
-    MessageRoute routeOne = new MessageRoute(definition, entityOne);
-    ProfileBuilder builderOne = bolt.getMessageDistributor().getBuilder(routeOne, Context.EMPTY_CONTEXT());
-    assertTrue(builderOne.isInitialized());
-    assertEquals(2, (int) convert(builderOne.valueOf("x"), Integer.class));
-
-    // validate - 1 message applied
-    MessageRoute routeTwo = new MessageRoute(definition, entityTwo);
-    ProfileBuilder builderTwo = bolt.getMessageDistributor().getBuilder(routeTwo, Context.EMPTY_CONTEXT());
-    assertTrue(builderTwo.isInitialized());
-    assertEquals(1, (int) convert(builderTwo.valueOf("x"), Integer.class));
-
-    assertNotSame(builderOne, builderTwo);
+
+    // create a mock
+    MessageDistributor distributor = mock(MessageDistributor.class);
+    bolt.withMessageDistributor(distributor);
+
+    // tell the bolt to flush on the first window
+    flushSignal.setFlushNow(true);
+
+    // execute the bolt; NO tick tuple
+    Tuple tuple1 = createTuple("entity", message1, profile1, 100000000L);
+    TupleWindow tupleWindow = createWindow(tuple1);
+    bolt.execute(tupleWindow);
+
+    // there was no tick tuple; the expired profiles should NOT have been flushed
+    verify(distributor, times(0)).flushExpired();
   }
 
   /**
-   * The bolt should create separate ProfileBuilder objects to handle each
-   * profile/entity pair.
+   * Creates a mock tick tuple to use for testing.
+   * @return A mock tick tuple.
    */
-  @Test
-  public void testCreateProfileBuilderForEachProfile() throws Exception {
+  private Tuple mockTickTuple() {
 
-    // setup - apply one message to different profile definitions
-    ProfileBuilderBolt bolt = createBolt();
-    String entity = (String) messageOne.get("ip_src_addr");
-
-    // apply a message to the first profile
-    ProfileConfig definitionOne = createDefinition(profileOne);
-    Tuple tupleOne = createTuple(entity, messageOne, definitionOne);
-    bolt.execute(tupleOne);
-
-    // apply the same message to the second profile
-    ProfileConfig definitionTwo = createDefinition(profileTwo);
-    Tuple tupleTwo = createTuple(entity, messageOne, definitionTwo);
-    bolt.execute(tupleTwo);
-
-    // validate - 1 message applied
-    MessageRoute routeOne = new MessageRoute(definitionOne, entity);
-    ProfileBuilder builderOne = bolt.getMessageDistributor().getBuilder(routeOne, Context.EMPTY_CONTEXT());
-    assertTrue(builderOne.isInitialized());
-    assertEquals(1, (int) convert(builderOne.valueOf("x"), Integer.class));
-
-    // validate - 1 message applied
-    MessageRoute routeTwo = new MessageRoute(definitionTwo, entity);
-    ProfileBuilder builderTwo = bolt.getMessageDistributor().getBuilder(routeTwo, Context.EMPTY_CONTEXT());
-    assertTrue(builderTwo.isInitialized());
-    assertEquals(1, (int) convert(builderTwo.valueOf("x"), Integer.class));
-
-    assertNotSame(builderOne, builderTwo);
+    Tuple tuple = mock(Tuple.class);
+    when(tuple.getSourceComponent()).thenReturn("__system");
+    when(tuple.getSourceStreamId()).thenReturn("__tick");
+
+    return tuple;
   }
 
   /**
-   * A ProfileMeasurement is build for each profile/entity pair.  A measurement for each profile/entity
-   * pair should be emitted.
+   * Retrieves the ProfileMeasurement(s) (if any) that have been emitted.
+   *
+   * @param collector The Storm output collector.
+   * @param expected The number of measurements expected.
+   * @return A list of ProfileMeasurement(s).
    */
-  @Test
-  public void testEmitMeasurements() throws Exception {
-
-    // setup
-    ProfileBuilderBolt bolt = createBolt();
-    final String entity = (String) messageOne.get("ip_src_addr");
+  private List<ProfileMeasurement> getProfileMeasurements(OutputCollector collector, int expected) {
 
-    // apply the message to the first profile
-    ProfileConfig definitionOne = createDefinition(profileOne);
-    Tuple tupleOne = createTuple(entity, messageOne, definitionOne);
-    bolt.execute(tupleOne);
+    // the 'streamId' is defined by the DestinationHandler being used by the bolt
+    final String streamId = emitter.getStreamId();
 
-    // apply the same message to the second profile
-    ProfileConfig definitionTwo = createDefinition(profileTwo);
-    Tuple tupleTwo = createTuple(entity, messageOne, definitionTwo);
-    bolt.execute(tupleTwo);
+    // capture the emitted tuple(s)
+    ArgumentCaptor<Values> argCaptor = ArgumentCaptor.forClass(Values.class);
+    verify(collector, times(expected))
+            .emit(eq(streamId), argCaptor.capture());
 
-    // execute - the tick tuple triggers a flush of the profile
-    bolt.execute(mockTickTuple());
+    // return the profile measurements that were emitted
+    return argCaptor.getAllValues()
+            .stream()
+            .map(val -> (ProfileMeasurement) val.get(0))
+            .collect(Collectors.toList());
+  }
 
-    // capture the ProfileMeasurement that should be emitted
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
+  /**
+   * Create a tuple that will contain the message, the entity name, and profile definition.
+   * @param entity The entity name
+   * @param message The telemetry message.
+   * @param profile The profile definition.
+   */
+  private Tuple createTuple(String entity, JSONObject message, ProfileConfig profile, long timestamp) {
 
-    // validate emitted measurements for hbase
-    verify(outputCollector, atLeastOnce()).emit(eq("hbase"), arg.capture());
-    for (Values value : arg.getAllValues()) {
+    Tuple tuple = mock(Tuple.class);
+    when(tuple.getValueByField(eq(ProfileSplitterBolt.MESSAGE_TUPLE_FIELD))).thenReturn(message);
+    when(tuple.getValueByField(eq(ProfileSplitterBolt.TIMESTAMP_TUPLE_FIELD))).thenReturn(timestamp);
+    when(tuple.getValueByField(eq(ProfileSplitterBolt.ENTITY_TUPLE_FIELD))).thenReturn(entity);
+    when(tuple.getValueByField(eq(ProfileSplitterBolt.PROFILE_TUPLE_FIELD))).thenReturn(profile);
 
-      ProfileMeasurement measurement = (ProfileMeasurement) value.get(0);
-      ProfileConfig definition = measurement.getDefinition();
+    return tuple;
+  }
 
-      if (StringUtils.equals(definitionTwo.getProfile(), definition.getProfile())) {
+  /**
+   * Create a ProfileBuilderBolt to test.
+   * @return A {@link ProfileBuilderBolt} to test.
+   */
+  private ProfileBuilderBolt createBolt() throws IOException {
 
-        // validate measurement emitted for profile two
-        assertEquals(definitionTwo, definition);
-        assertEquals(entity, measurement.getEntity());
-        assertEquals(definitionTwo.getProfile(), measurement.getProfileName());
-        assertEquals(1, (int) convert(measurement.getProfileValue(), Integer.class));
+    return createBolt(30, TimeUnit.SECONDS);
+  }
 
-      } else if (StringUtils.equals(definitionOne.getProfile(), definition.getProfile())) {
+  /**
+   * Create a ProfileBuilderBolt to test.
+   *
+   * @param windowDuration The event window duration.
+   * @param windowDurationUnits The units of the event window duration.
+   * @return A {@link ProfileBuilderBolt} to test.
+   */
+  private ProfileBuilderBolt createBolt(int windowDuration, TimeUnit windowDurationUnits) throws IOException {
+
+    // defines the zk configurations accessible from the bolt
+    ProfilerConfigurations configurations = new ProfilerConfigurations();
+    configurations.updateGlobalConfig(Collections.emptyMap());
+
+    emitter = new HBaseEmitter();
+    ProfileBuilderBolt bolt = (ProfileBuilderBolt) new ProfileBuilderBolt()
+            .withProfileTimeToLive(30, TimeUnit.MINUTES)
+            .withMaxNumberOfRoutes(Long.MAX_VALUE)
+            .withZookeeperClient(client)
+            .withZookeeperCache(cache)
+            .withEmitter(emitter)
+            .withProfilerConfigurations(configurations)
+            .withPeriodDuration(1, TimeUnit.MINUTES)
+            .withTumblingWindow(new BaseWindowedBolt.Duration(windowDuration, windowDurationUnits));
+    bolt.prepare(new HashMap<>(), topologyContext, outputCollector);
 
-        // validate measurement emitted for profile one
-        assertEquals(definitionOne, definition);
-        assertEquals(entity, measurement.getEntity());
-        assertEquals(definitionOne.getProfile(), measurement.getProfileName());
-        assertEquals(1, (int) convert(measurement.getProfileValue(), Integer.class));
+    // set the flush signal AFTER calling 'prepare'
+    bolt.withFlushSignal(flushSignal);
 
-      } else {
-        fail();
-      }
-    }
+    return bolt;
   }
 
   /**
-   * A ProfileMeasurement is build for each profile/entity pair.  The measurement should be emitted to each
-   * destination defined by the profile. By default, a profile uses both Kafka and HBase as destinations.
+   * Creates a mock TupleWindow containing multiple tuples.
+   * @param tuples The tuples to add to the window.
    */
-  @Test
-  public void testDestinationHandlers() throws Exception {
+  private TupleWindow createWindow(Tuple... tuples) {
 
-    // setup
-    ProfileBuilderBolt bolt = createBolt();
-    ProfileConfig definitionOne = createDefinition(profileOne);
+    TupleWindow window = mock(TupleWindow.class);
+    when(window.get()).thenReturn(Arrays.asList(tuples));
+    return window;
+  }
 
-    // apply the message to the first profile
-    final String entity = (String) messageOne.get("ip_src_addr");
-    Tuple tupleOne = createTuple(entity, messageOne, definitionOne);
-    bolt.execute(tupleOne);
+  /**
+   * An implementation for testing purposes only.
+   */
+  private class TestEmitter implements ProfileMeasurementEmitter {
 
-    // trigger a flush of the profile
-    bolt.execute(mockTickTuple());
+    private String streamId;
 
-    // capture the values that should be emitted
-    ArgumentCaptor<Values> arg = ArgumentCaptor.forClass(Values.class);
+    public TestEmitter(String streamId) {
+      this.streamId = streamId;
+    }
 
-    // validate measurements emitted to HBase
-    verify(outputCollector, times(1)).emit(eq("hbase"), arg.capture());
-    assertTrue(arg.getValue().get(0) instanceof ProfileMeasurement);
+    @Override
+    public String getStreamId() {
+      return streamId;
+    }
 
-    // validate measurements emitted to Kafka
-    verify(outputCollector, times(1)).emit(eq("kafka"), arg.capture());
-    assertTrue(arg.getValue().get(0) instanceof JSONObject);
+    @Override
+    public void declareOutputFields(OutputFieldsDeclarer declarer) {
+      declarer.declareStream(getStreamId(), new Fields("measurement"));
+    }
+
+    @Override
+    public void emit(ProfileMeasurement measurement, OutputCollector collector) {
+      collector.emit(getStreamId(), new Values(measurement));
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileHBaseMapperTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileHBaseMapperTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileHBaseMapperTest.java
index 17d6827..04c774c 100644
--- a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileHBaseMapperTest.java
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileHBaseMapperTest.java
@@ -20,11 +20,11 @@
 
 package org.apache.metron.profiler.bolt;
 
-import org.apache.metron.common.configuration.profiler.ProfileResult;
-import org.apache.storm.tuple.Tuple;
 import org.apache.metron.common.configuration.profiler.ProfileConfig;
+import org.apache.metron.common.configuration.profiler.ProfileResult;
 import org.apache.metron.profiler.ProfileMeasurement;
 import org.apache.metron.profiler.hbase.RowKeyBuilder;
+import org.apache.storm.tuple.Tuple;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -32,10 +32,8 @@ import org.junit.Test;
 import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 
-import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 /**

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileSplitterBoltTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileSplitterBoltTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileSplitterBoltTest.java
index beab8d5..bf81923 100644
--- a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileSplitterBoltTest.java
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/bolt/ProfileSplitterBoltTest.java
@@ -21,7 +21,10 @@
 package org.apache.metron.profiler.bolt;
 
 import org.adrianwalker.multilinestring.Multiline;
-import org.apache.metron.stellar.common.DefaultStellarStatefulExecutor;
+import org.apache.metron.common.configuration.profiler.ProfileConfig;
+import org.apache.metron.common.configuration.profiler.ProfilerConfig;
+import org.apache.metron.profiler.clock.FixedClockFactory;
+import org.apache.metron.common.utils.JSONUtils;
 import org.apache.metron.test.bolt.BaseBoltTest;
 import org.apache.storm.tuple.Tuple;
 import org.apache.storm.tuple.Values;
@@ -31,12 +34,15 @@ import org.json.simple.parser.ParseException;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.IOException;
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
 import java.util.HashMap;
 
 import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.refEq;
-import static org.mockito.Mockito.*;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 /**
  * Tests the ProfileSplitterBolt.
@@ -47,7 +53,9 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
    * {
    *   "ip_src_addr": "10.0.0.1",
    *   "ip_dst_addr": "10.0.0.20",
-   *   "protocol": "HTTP"
+   *   "protocol": "HTTP",
+   *   "timestamp.custom": 2222222222222,
+   *   "timestamp.string": "3333333333333"
    * }
    */
   @Multiline
@@ -68,7 +76,7 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
    * }
    */
   @Multiline
-  private String onlyIfTrue;
+  private String profileWithOnlyIfTrue;
 
   /**
    * {
@@ -85,7 +93,7 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
    * }
    */
   @Multiline
-  private String onlyIfFalse;
+  private String profileWithOnlyIfFalse;
 
   /**
    * {
@@ -101,7 +109,7 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
    * }
    */
   @Multiline
-  private String onlyIfMissing;
+  private String profileWithOnlyIfMissing;
 
   /**
    * {
@@ -118,9 +126,89 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
    * }
    */
   @Multiline
-  private String onlyIfInvalid;
+  private String profileWithOnlyIfInvalid;
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "test",
+   *        "foreach": "ip_src_addr",
+   *        "init": {},
+   *        "update": {},
+   *        "result": "2"
+   *      }
+   *   ],
+   *   "timestampField": "timestamp.custom"
+   * }
+   */
+  @Multiline
+  private String profileUsingCustomTimestampField;
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "test",
+   *        "foreach": "ip_src_addr",
+   *        "init": {},
+   *        "update": {},
+   *        "result": "2"
+   *      }
+   *   ],
+   *   "timestampField": "timestamp.missing"
+   * }
+   */
+  @Multiline
+  private String profileUsingMissingTimestampField;
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "test",
+   *        "foreach": "ip_src_addr",
+   *        "init": {},
+   *        "update": {},
+   *        "result": "2"
+   *      }
+   *   ],
+   *   "timestampField": "timestamp.string"
+   * }
+   */
+  @Multiline
+  private String profileUsingStringTimestampField;
+
+  /**
+   * {
+   *   "profiles": [
+   *   ]
+   * }
+   */
+  @Multiline
+  private String noProfilesDefined;
+
+  /**
+   * {
+   *   "profiles": [
+   *      {
+   *        "profile": "profile1",
+   *        "foreach": "'global'",
+   *        "result": "1"
+   *      },
+   *      {
+   *        "profile": "profile2",
+   *        "foreach": "'global'",
+   *        "result": "2"
+   *      }
+   *   ]
+   * }
+   */
+  @Multiline
+  private String twoProfilesDefined;
 
   private JSONObject message;
+  private long timestamp = 3333333;
 
   @Before
   public void setup() throws ParseException {
@@ -134,17 +222,83 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
   }
 
   /**
-   * Create a ProfileSplitterBolt to test
+   * Ensure that a tuple with the correct fields is emitted to downstream bolts
+   * when a profile is defined.
    */
-  private ProfileSplitterBolt createBolt(String profilerConfig) throws IOException {
+  @Test
+  public void testEmitTupleWithOneProfile() throws Exception {
 
-    ProfileSplitterBolt bolt = new ProfileSplitterBolt("zookeeperURL");
-    bolt.setCuratorFramework(client);
-    bolt.setZKCache(cache);
-    bolt.getConfigurations().updateProfilerConfig(profilerConfig.getBytes("UTF-8"));
-    bolt.prepare(new HashMap<>(), topologyContext, outputCollector);
+    // setup the bolt and execute a tuple
+    ProfilerConfig config = toProfilerConfig(profileWithOnlyIfTrue);
+    ProfileSplitterBolt bolt = createBolt(config);
+    bolt.execute(tuple);
 
-    return bolt;
+    // the expected tuple fields
+    String expectedEntity = "10.0.0.1";
+    ProfileConfig expectedConfig = config.getProfiles().get(0);
+    Values expected = new Values(message, timestamp, expectedEntity, expectedConfig);
+
+    // a tuple should be emitted for the downstream profile builder
+    verify(outputCollector, times(1))
+            .emit(eq(tuple), eq(expected));
+
+    // the original tuple should be ack'd
+    verify(outputCollector, times(1))
+            .ack(eq(tuple));
+  }
+
+  /**
+   * If there are two profiles that need the same message, then two tuples should
+   * be emitted.  One tuple for each profile.
+   */
+  @Test
+  public void testEmitTupleWithTwoProfiles() throws Exception {
+
+    // setup the bolt and execute a tuple
+    ProfilerConfig config = toProfilerConfig(twoProfilesDefined);
+    ProfileSplitterBolt bolt = createBolt(config);
+    bolt.execute(tuple);
+
+    // the expected tuple fields
+    final String expectedEntity = "global";
+    {
+      // a tuple should be emitted for the first profile
+      ProfileConfig profile1 = config.getProfiles().get(0);
+      Values expected = new Values(message, timestamp, expectedEntity, profile1);
+      verify(outputCollector, times(1))
+              .emit(eq(tuple), eq(expected));
+    }
+    {
+      // a tuple should be emitted for the second profile
+      ProfileConfig profile2 = config.getProfiles().get(1);
+      Values expected = new Values(message, timestamp, expectedEntity, profile2);
+      verify(outputCollector, times(1))
+              .emit(eq(tuple), eq(expected));
+    }
+
+    // the original tuple should be ack'd
+    verify(outputCollector, times(1))
+            .ack(eq(tuple));
+  }
+
+  /**
+   * No tuples should be emitted, if no profiles are defined.
+   */
+  @Test
+  public void testNoProfilesDefined() throws Exception {
+
+    // setup the bolt and execute a tuple
+    ProfilerConfig config = toProfilerConfig(noProfilesDefined);
+    ProfileSplitterBolt bolt = createBolt(config);
+    bolt.execute(tuple);
+
+    // no tuple should be emitted
+    verify(outputCollector, times(0))
+            .emit(any(Tuple.class), any());
+
+    // the original tuple should be ack'd
+    verify(outputCollector, times(1))
+            .ack(eq(tuple));
   }
 
   /**
@@ -154,17 +308,17 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
   @Test
   public void testOnlyIfTrue() throws Exception {
 
-    // setup
-    ProfileSplitterBolt bolt = createBolt(onlyIfTrue);
-
-    // execute
+    ProfilerConfig config = toProfilerConfig(profileWithOnlyIfTrue);
+    ProfileSplitterBolt bolt = createBolt(config);
     bolt.execute(tuple);
 
     // a tuple should be emitted for the downstream profile builder
-    verify(outputCollector, times(1)).emit(refEq(tuple), any(Values.class));
+    verify(outputCollector, times(1))
+            .emit(eq(tuple), any(Values.class));
 
     // the original tuple should be ack'd
-    verify(outputCollector, times(1)).ack(tuple);
+    verify(outputCollector, times(1))
+            .ack(eq(tuple));
   }
 
   /**
@@ -174,17 +328,17 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
   @Test
   public void testOnlyIfMissing() throws Exception {
 
-    // setup
-    ProfileSplitterBolt bolt = createBolt(onlyIfMissing);
-
-    // execute
+    ProfilerConfig config = toProfilerConfig(profileWithOnlyIfMissing);
+    ProfileSplitterBolt bolt = createBolt(config);
     bolt.execute(tuple);
 
     // a tuple should be emitted for the downstream profile builder
-    verify(outputCollector, times(1)).emit(refEq(tuple), any(Values.class));
+    verify(outputCollector, times(1))
+            .emit(eq(tuple), any(Values.class));
 
     // the original tuple should be ack'd
-    verify(outputCollector, times(1)).ack(tuple);
+    verify(outputCollector, times(1))
+            .ack(eq(tuple));
   }
 
   /**
@@ -194,36 +348,45 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
   @Test
   public void testOnlyIfFalse() throws Exception {
 
-    // setup
-    ProfileSplitterBolt bolt = createBolt(onlyIfFalse);
-
-    // execute
+    ProfilerConfig config = toProfilerConfig(profileWithOnlyIfFalse);
+    ProfileSplitterBolt bolt = createBolt(config);
     bolt.execute(tuple);
 
     // a tuple should NOT be emitted for the downstream profile builder
-    verify(outputCollector, times(0)).emit(any(Values.class));
+    verify(outputCollector, times(0))
+            .emit(any());
 
     // the original tuple should be ack'd
-    verify(outputCollector, times(1)).ack(tuple);
+    verify(outputCollector, times(1))
+            .ack(eq(tuple));
   }
 
   /**
-   * The entity associated with a ProfileMeasurement can be defined using a variable that is resolved
-   * via Stella.  In this case the entity is defined as 'ip_src_addr' which is resolved to
-   * '10.0.0.1' based on the data contained within the message.
+   * The entity associated with a profile is defined with a Stellar expression.  That expression
+   * can refer to any field within the message.
+   *
+   * In this case the entity is defined as 'ip_src_addr' which is resolved to '10.0.0.1' based on
+   * the data contained within the message.
    */
   @Test
   public void testResolveEntityName() throws Exception {
 
-    // setup
-    ProfileSplitterBolt bolt = createBolt(onlyIfTrue);
-
-    // execute
+    ProfilerConfig config = toProfilerConfig(profileWithOnlyIfTrue);
+    ProfileSplitterBolt bolt = createBolt(config);
     bolt.execute(tuple);
 
-    // verify - the entity name comes from variable resolution in stella
+    // expected values
     String expectedEntity = "10.0.0.1";
-    verify(outputCollector, times(1)).emit(any(Tuple.class), refEq(new Values(expectedEntity, onlyIfTrue, message)));
+    ProfileConfig expectedConfig = config.getProfiles().get(0);
+    Values expected = new Values(message, timestamp, expectedEntity, expectedConfig);
+
+    // a tuple should be emitted for the downstream profile builder
+    verify(outputCollector, times(1))
+            .emit(eq(tuple), eq(expected));
+
+    // the original tuple should be ack'd
+    verify(outputCollector, times(1))
+            .ack(eq(tuple));
   }
 
   /**
@@ -232,11 +395,42 @@ public class ProfileSplitterBoltTest extends BaseBoltTest {
   @Test
   public void testOnlyIfInvalid() throws Exception {
 
-    // setup
-    ProfileSplitterBolt bolt = createBolt(onlyIfInvalid);
+    ProfilerConfig config = toProfilerConfig(profileWithOnlyIfInvalid);
+    ProfileSplitterBolt bolt = createBolt(config);
     bolt.execute(tuple);
 
     // a tuple should NOT be emitted for the downstream profile builder
-    verify(outputCollector, times(0)).emit(any(Values.class));
+    verify(outputCollector, times(0))
+            .emit(any(Values.class));
+  }
+
+  /**
+   * Creates a ProfilerConfig based on a string containing JSON.
+   *
+   * @param configAsJSON The config as JSON.
+   * @return The ProfilerConfig.
+   * @throws Exception
+   */
+  private ProfilerConfig toProfilerConfig(String configAsJSON) throws Exception {
+    InputStream in = new ByteArrayInputStream(configAsJSON.getBytes("UTF-8"));
+    return JSONUtils.INSTANCE.load(in, ProfilerConfig.class);
   }
+
+  /**
+   * Create a ProfileSplitterBolt to test
+   */
+  private ProfileSplitterBolt createBolt(ProfilerConfig config) throws Exception {
+
+    ProfileSplitterBolt bolt = new ProfileSplitterBolt("zookeeperURL");
+    bolt.setCuratorFramework(client);
+    bolt.setZKCache(cache);
+    bolt.getConfigurations().updateProfilerConfig(config);
+    bolt.prepare(new HashMap<>(), topologyContext, outputCollector);
+
+    // set the clock factory AFTER calling prepare to use the fixed clock factory
+    bolt.setClockFactory(new FixedClockFactory(timestamp));
+
+    return bolt;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/MessageBuilder.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/MessageBuilder.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/MessageBuilder.java
new file mode 100644
index 0000000..7e1628b
--- /dev/null
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/MessageBuilder.java
@@ -0,0 +1,75 @@
+/*
+ *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ */
+
+package org.apache.metron.profiler.integration;
+
+import org.json.simple.JSONObject;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Enables simple creation of telemetry messages for testing.
+ */
+public class MessageBuilder {
+
+  private Map<Object, Object> fields;
+
+  /**
+   * Create a new {@link MessageBuilder}.
+   */
+  public MessageBuilder() {
+    this.fields = new HashMap<>();
+  }
+
+  /**
+   * Adds all of the fields from a message to this message.
+   *
+   * @param prototype The other message that is treated as a prototype.
+   * @return A {@link MessageBuilder}
+   */
+  public MessageBuilder withFields(JSONObject prototype) {
+    prototype.forEach((key, val) -> this.fields.put(key, val));
+    return this;
+  }
+
+  /**
+   * Adds a field to the message.
+   *
+   * @param key The field name.
+   * @param value The field value.
+   * @return A {@link MessageBuilder}
+   */
+  public MessageBuilder withField(String key, Object value) {
+    this.fields.put(key, value);
+    return this;
+  }
+
+  /**
+   * Build the message.
+   *
+   * <p>This should be called after defining all of the message fields.
+   *
+   * @return A {@link MessageBuilder}.
+   */
+  public JSONObject build() {
+    return new JSONObject(fields);
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java
----------------------------------------------------------------------
diff --git a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java
index 0d1b465..c48a3e9 100644
--- a/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java
+++ b/metron-analytics/metron-profiler/src/test/java/org/apache/metron/profiler/integration/ProfilerIntegrationTest.java
@@ -28,15 +28,18 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.metron.common.Constants;
 import org.apache.metron.common.utils.SerDeUtils;
-import org.apache.metron.hbase.mock.MockHTable;
 import org.apache.metron.hbase.mock.MockHBaseTableProvider;
+import org.apache.metron.hbase.mock.MockHTable;
 import org.apache.metron.integration.BaseIntegrationTest;
 import org.apache.metron.integration.ComponentRunner;
 import org.apache.metron.integration.UnableToStartException;
 import org.apache.metron.integration.components.FluxTopologyComponent;
 import org.apache.metron.integration.components.KafkaComponent;
 import org.apache.metron.integration.components.ZKServerComponent;
+import org.apache.metron.profiler.ProfileMeasurement;
 import org.apache.metron.profiler.hbase.ColumnBuilder;
+import org.apache.metron.profiler.hbase.RowKeyBuilder;
+import org.apache.metron.profiler.hbase.SaltyRowKeyBuilder;
 import org.apache.metron.profiler.hbase.ValueOnlyColumnBuilder;
 import org.apache.metron.statistics.OnlineStatisticsProvider;
 import org.junit.After;
@@ -49,15 +52,15 @@ import org.junit.Test;
 import java.io.File;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
+import java.util.concurrent.TimeUnit;
 
 import static com.google.code.tempusfugit.temporal.Duration.seconds;
 import static com.google.code.tempusfugit.temporal.Timeout.timeout;
 import static com.google.code.tempusfugit.temporal.WaitFor.waitOrTimeout;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
 
 /**
  * An integration test of the Profiler topology.
@@ -105,7 +108,6 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
   private static FluxTopologyComponent fluxComponent;
   private static KafkaComponent kafkaComponent;
   private static ConfigUploadComponent configUploadComponent;
-  private static List<byte[]> input;
   private static ComponentRunner runner;
   private static MockHTable profilerTable;
 
@@ -114,7 +116,13 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
   private static final double epsilon = 0.001;
   private static final String inputTopic = Constants.INDEXING_TOPIC;
   private static final String outputTopic = "profiles";
+  private static final int saltDivisor = 10;
 
+  private static final long windowLagMillis = TimeUnit.SECONDS.toMillis(5);
+  private static final long windowDurationMillis = TimeUnit.SECONDS.toMillis(5);
+  private static final long periodDurationMillis = TimeUnit.SECONDS.toMillis(15);
+  private static final long profileTimeToLiveMillis = TimeUnit.SECONDS.toMillis(20);
+  private static final long maxRoutesPerBolt = 100000;
 
   /**
    * Tests the first example contained within the README.
@@ -122,22 +130,25 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
   @Test
   public void testExample1() throws Exception {
 
-    update(TEST_RESOURCES + "/config/zookeeper/readme-example-1");
+    uploadConfig(TEST_RESOURCES + "/config/zookeeper/readme-example-1");
 
     // start the topology and write test messages to kafka
     fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, input);
+    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
+    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
+    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
 
     // verify - ensure the profile is being persisted
     waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
-            timeout(seconds(90)));
+            timeout(seconds(180)));
 
     // verify - only 10.0.0.2 sends 'HTTP', thus there should be only 1 value
-    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily, columnBuilder.getColumnQualifier("value"), Double.class);
+    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily,
+            columnBuilder.getColumnQualifier("value"), Double.class);
 
-    // verify - there are 5 'HTTP' each with 390 bytes
+    // verify - there are 3 'HTTP' each with 390 bytes
     Assert.assertTrue(actuals.stream().anyMatch(val ->
-            MathUtils.equals(390.0 * 5, val, epsilon)
+            MathUtils.equals(390.0 * 3, val, epsilon)
     ));
   }
 
@@ -147,11 +158,13 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
   @Test
   public void testExample2() throws Exception {
 
-    update(TEST_RESOURCES + "/config/zookeeper/readme-example-2");
+    uploadConfig(TEST_RESOURCES + "/config/zookeeper/readme-example-2");
 
     // start the topology and write test messages to kafka
     fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, input);
+    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
+    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
+    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
 
     // expect 2 values written by the profile; one for 10.0.0.2 and another for 10.0.0.3
     final int expected = 2;
@@ -161,16 +174,17 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
             timeout(seconds(90)));
 
     // verify - expect 2 results as 2 hosts involved; 10.0.0.2 sends 'HTTP' and 10.0.0.3 send 'DNS'
-    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily, columnBuilder.getColumnQualifier("value"), Double.class);
+    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily,
+            columnBuilder.getColumnQualifier("value"), Double.class);
 
-    // verify - 10.0.0.3 -> 1/6
-    Assert.assertTrue( "Could not find a value near 1/6. Actual values read are are: " + Joiner.on(",").join(actuals)
-                     , actuals.stream().anyMatch(val -> MathUtils.equals(val, 1.0/6.0, epsilon)
+    // verify - 10.0.0.3 -> 1/4
+    Assert.assertTrue( "Could not find a value near 1/4. Actual values read are are: " + Joiner.on(",").join(actuals),
+            actuals.stream().anyMatch(val -> MathUtils.equals(val, 1.0/4.0, epsilon)
     ));
 
-    // verify - 10.0.0.2 -> 6/1
-    Assert.assertTrue("Could not find a value near 6. Actual values read are are: " + Joiner.on(",").join(actuals)
-            ,actuals.stream().anyMatch(val -> MathUtils.equals(val, 6.0/1.0, epsilon)
+    // verify - 10.0.0.2 -> 4/1
+    Assert.assertTrue("Could not find a value near 4. Actual values read are are: " + Joiner.on(",").join(actuals),
+            actuals.stream().anyMatch(val -> MathUtils.equals(val, 4.0/1.0, epsilon)
     ));
   }
 
@@ -180,22 +194,25 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
   @Test
   public void testExample3() throws Exception {
 
-    update(TEST_RESOURCES + "/config/zookeeper/readme-example-3");
+    uploadConfig(TEST_RESOURCES + "/config/zookeeper/readme-example-3");
 
     // start the topology and write test messages to kafka
     fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, input);
+    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
+    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
+    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
 
     // verify - ensure the profile is being persisted
     waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
             timeout(seconds(90)));
 
     // verify - only 10.0.0.2 sends 'HTTP', thus there should be only 1 value
-    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily, columnBuilder.getColumnQualifier("value"), Double.class);
+    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily,
+            columnBuilder.getColumnQualifier("value"), Double.class);
 
     // verify - there are 5 'HTTP' messages each with a length of 20, thus the average should be 20
-    Assert.assertTrue("Could not find a value near 20. Actual values read are are: " + Joiner.on(",").join(actuals)
-                     , actuals.stream().anyMatch(val -> MathUtils.equals(val, 20.0, epsilon)
+    Assert.assertTrue("Could not find a value near 20. Actual values read are are: " + Joiner.on(",").join(actuals),
+            actuals.stream().anyMatch(val -> MathUtils.equals(val, 20.0, epsilon)
     ));
   }
 
@@ -205,11 +222,13 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
   @Test
   public void testExample4() throws Exception {
 
-    update(TEST_RESOURCES + "/config/zookeeper/readme-example-4");
+    uploadConfig(TEST_RESOURCES + "/config/zookeeper/readme-example-4");
 
     // start the topology and write test messages to kafka
     fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, input);
+    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
+    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
+    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
 
     // verify - ensure the profile is being persisted
     waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
@@ -220,34 +239,109 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
     List<OnlineStatisticsProvider> actuals = read(profilerTable.getPutLog(), columnFamily, column, OnlineStatisticsProvider.class);
 
     // verify - there are 5 'HTTP' messages each with a length of 20, thus the average should be 20
-    Assert.assertTrue("Could not find a value near 20. Actual values read are are: " + Joiner.on(",").join(actuals)
-                     , actuals.stream().anyMatch(val -> MathUtils.equals(val.getMean(), 20.0, epsilon)
+    Assert.assertTrue("Could not find a value near 20. Actual values read are are: " + Joiner.on(",").join(actuals),
+            actuals.stream().anyMatch(val -> MathUtils.equals(val.getMean(), 20.0, epsilon)
     ));
   }
 
   @Test
   public void testPercentiles() throws Exception {
 
-    update(TEST_RESOURCES + "/config/zookeeper/percentiles");
+    uploadConfig(TEST_RESOURCES + "/config/zookeeper/percentiles");
+
+    // start the topology and write test messages to kafka
+    fluxComponent.submitTopology();
+    kafkaComponent.writeMessages(inputTopic, message1, message1, message1);
+    kafkaComponent.writeMessages(inputTopic, message2, message2, message2);
+    kafkaComponent.writeMessages(inputTopic, message3, message3, message3);
+
+    // verify - ensure the profile is being persisted
+    waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
+            timeout(seconds(90)));
+
+    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily,
+            columnBuilder.getColumnQualifier("value"), Double.class);
 
+    // verify - the 70th percentile of x3, 20s = 20.0
+    Assert.assertTrue("Could not find a value near 20. Actual values read are are: " + Joiner.on(",").join(actuals),
+            actuals.stream().anyMatch(val -> MathUtils.equals(val, 20.0, epsilon)));
+  }
+
+  /**
+   * The Profiler can optionally perform event time processing.  With event time processing,
+   * the Profiler uses timestamps contained in the source telemetry.
+   *
+   * <p>Defining a 'timestampField' within the Profiler configuration tells the Profiler
+   * from which field the timestamp should be extracted.
+   */
+  @Test
+  public void testEventTimeProcessing() throws Exception {
+
+    // constants used for the test
+    final long startAt = 10;
+    final String entity = "10.0.0.1";
+    final String profileName = "event-time-test";
+
+    // create some messages that contain a timestamp - a really old timestamp; close to 1970
+    String message1 = new MessageBuilder()
+            .withField("ip_src_addr", entity)
+            .withField("timestamp", startAt)
+            .build()
+            .toJSONString();
+
+    String message2 = new MessageBuilder()
+            .withField("ip_src_addr", entity)
+            .withField("timestamp", startAt + 100)
+            .build()
+            .toJSONString();
+
+    uploadConfig(TEST_RESOURCES + "/config/zookeeper/event-time-test");
 
     // start the topology and write test messages to kafka
     fluxComponent.submitTopology();
-    kafkaComponent.writeMessages(inputTopic, input);
+    kafkaComponent.writeMessages(inputTopic, message1, message2);
 
     // verify - ensure the profile is being persisted
     waitOrTimeout(() -> profilerTable.getPutLog().size() > 0,
             timeout(seconds(90)));
 
-    List<Double> actuals = read(profilerTable.getPutLog(), columnFamily, columnBuilder.getColumnQualifier("value"), Double.class);
+    List<Put> puts = profilerTable.getPutLog();
+    assertEquals(1, puts.size());
+
+    // inspect the row key to ensure the profiler used event time correctly.  the timestamp
+    // embedded in the row key should match those in the source telemetry
+    byte[] expectedRowKey = generateExpectedRowKey(profileName, entity, startAt);
+    byte[] actualRowKey = puts.get(0).getRow();
+    String msg = String.format("expected '%s', got '%s'",
+            new String(expectedRowKey, "UTF-8"),
+            new String(actualRowKey, "UTF-8"));
+    assertArrayEquals(msg, expectedRowKey, actualRowKey);
+  }
 
-    // verify - the 70th percentile of 5 x 20s = 20.0
-    Assert.assertTrue("Could not find a value near 20. Actual values read are are: " + Joiner.on(",").join(actuals)
-                     , actuals.stream().anyMatch(val -> MathUtils.equals(val, 20.0, epsilon)));
+  /**
+   * Generates the expected row key.
+   *
+   * @param profileName The name of the profile.
+   * @param entity The entity.
+   * @param whenMillis A timestamp in epoch milliseconds.
+   * @return A row key.
+   */
+  private byte[] generateExpectedRowKey(String profileName, String entity, long whenMillis) {
+
+    // only the profile name, entity, and period are used to generate the row key
+    ProfileMeasurement measurement = new ProfileMeasurement()
+            .withProfileName(profileName)
+            .withEntity(entity)
+            .withPeriod(whenMillis, periodDurationMillis, TimeUnit.MILLISECONDS);
+
+    // build the row key
+    RowKeyBuilder rowKeyBuilder = new SaltyRowKeyBuilder(saltDivisor, periodDurationMillis, TimeUnit.MILLISECONDS);
+    return rowKeyBuilder.rowKey(measurement);
   }
 
   /**
    * Reads a value written by the Profiler.
+   *
    * @param family The column family.
    * @param qualifier The column qualifier.
    * @param clazz The expected type of the value.
@@ -258,7 +352,8 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
     List<T> results = new ArrayList<>();
 
     for(Put put: puts) {
-      for(Cell cell: put.get(Bytes.toBytes(family), qualifier)) {
+      List<Cell> cells = put.get(Bytes.toBytes(family), qualifier);
+      for(Cell cell : cells) {
         T value = SerDeUtils.fromBytes(cell.getValue(), clazz);
         results.add(value);
       }
@@ -271,39 +366,41 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
   public static void setupBeforeClass() throws UnableToStartException {
     columnBuilder = new ValueOnlyColumnBuilder(columnFamily);
 
-    List<String> inputNew = Stream.of(message1, message2, message3)
-        .map(m -> Collections.nCopies(5, m))
-        .flatMap(l -> l.stream())
-        .collect(Collectors.toList());
-
-    // create input messages for the profiler to consume
-    input = Stream.of(message1, message2, message3)
-            .map(Bytes::toBytes)
-            .map(m -> Collections.nCopies(5, m))
-            .flatMap(l -> l.stream())
-            .collect(Collectors.toList());
-
     // storm topology properties
     final Properties topologyProperties = new Properties() {{
-      setProperty("kafka.start", "UNCOMMITTED_EARLIEST");
+
+      // storm settings
       setProperty("profiler.workers", "1");
       setProperty("profiler.executors", "0");
+      setProperty("storm.auto.credentials", "[]");
+      setProperty("topology.auto-credentials", "[]");
+      setProperty("topology.message.timeout.secs", "60");
+      setProperty("topology.max.spout.pending", "100000");
+
+      // kafka settings
       setProperty("profiler.input.topic", inputTopic);
       setProperty("profiler.output.topic", outputTopic);
-      setProperty("profiler.period.duration", "20");
-      setProperty("profiler.period.duration.units", "SECONDS");
-      setProperty("profiler.ttl", "30");
-      setProperty("profiler.ttl.units", "MINUTES");
-      setProperty("profiler.hbase.salt.divisor", "10");
+      setProperty("kafka.start", "UNCOMMITTED_EARLIEST");
+      setProperty("kafka.security.protocol", "PLAINTEXT");
+
+      // hbase settings
+      setProperty("profiler.hbase.salt.divisor", Integer.toString(saltDivisor));
       setProperty("profiler.hbase.table", tableName);
       setProperty("profiler.hbase.column.family", columnFamily);
       setProperty("profiler.hbase.batch", "10");
       setProperty("profiler.hbase.flush.interval.seconds", "1");
-      setProperty("profiler.profile.ttl", "20");
       setProperty("hbase.provider.impl", "" + MockHBaseTableProvider.class.getName());
-      setProperty("storm.auto.credentials", "[]");
-      setProperty("kafka.security.protocol", "PLAINTEXT");
-      setProperty("topology.auto-credentials", "[]");
+
+      // profile settings
+      setProperty("profiler.period.duration", Long.toString(periodDurationMillis));
+      setProperty("profiler.period.duration.units", "MILLISECONDS");
+      setProperty("profiler.ttl", Long.toString(profileTimeToLiveMillis));
+      setProperty("profiler.ttl.units", "MILLISECONDS");
+      setProperty("profiler.window.duration", Long.toString(windowDurationMillis));
+      setProperty("profiler.window.duration.units", "MILLISECONDS");
+      setProperty("profiler.window.lag", Long.toString(windowLagMillis));
+      setProperty("profiler.window.lag.units", "MILLISECONDS");
+      setProperty("profiler.max.routes.per.bolt", Long.toString(maxRoutesPerBolt));
     }};
 
     // create the mock table
@@ -311,7 +408,7 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
 
     zkComponent = getZKServerComponent(topologyProperties);
 
-    // create the input topic
+    // create the input and output topics
     kafkaComponent = getKafkaComponent(topologyProperties, Arrays.asList(
             new KafkaComponent.Topic(inputTopic, 1),
             new KafkaComponent.Topic(outputTopic, 1)));
@@ -340,12 +437,6 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
     runner.start();
   }
 
-  public void update(String path) throws Exception {
-    configUploadComponent.withGlobalConfiguration(path)
-        .withProfilerConfiguration(path);
-    configUploadComponent.update();
-  }
-
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
     MockHBaseTableProvider.clear();
@@ -368,4 +459,16 @@ public class ProfilerIntegrationTest extends BaseIntegrationTest {
       runner.reset();
     }
   }
-}
\ No newline at end of file
+
+  /**
+   * Uploads config values to Zookeeper.
+   * @param path The path on the local filesystem to the config values.
+   * @throws Exception
+   */
+  public void uploadConfig(String path) throws Exception {
+    configUploadComponent
+            .withGlobalConfiguration(path)
+            .withProfilerConfiguration(path)
+            .update();
+  }
+}

http://git-wip-us.apache.org/repos/asf/metron/blob/3083b471/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-profiler-env.xml
----------------------------------------------------------------------
diff --git a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-profiler-env.xml b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-profiler-env.xml
index c7f6ce2..8546b56 100644
--- a/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-profiler-env.xml
+++ b/metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/configuration/metron-profiler-env.xml
@@ -57,9 +57,33 @@
       <type>value-list</type>
       <entries>
         <entry>
-          <value>DAYS</value>
+          <value>HOURS</value>
         </entry>
         <entry>
+          <value>MINUTES</value>
+        </entry>
+        <entry>
+          <value>SECONDS</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>profiler_window_duration</name>
+    <value>30</value>
+    <description>The duration of each profile window. This value should be defined along with profiler.window.duration.units</description>
+    <display-name>Window Duration</display-name>
+  </property>
+  <property>
+    <name>profiler_window_units</name>
+    <value>SECONDS</value>
+    <description>The units used to specify the profiler.window.duration. This value should be defined along with profiler.window.duration.</description>
+    <display-name>Window Units</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
           <value>HOURS</value>
         </entry>
         <entry>
@@ -71,7 +95,6 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-
   </property>
   <property>
     <name>profiler_ttl</name>
@@ -104,8 +127,54 @@
       </entries>
       <selection-cardinality>1</selection-cardinality>
     </value-attributes>
-
-
+  </property>
+  <property>
+    <name>profiler_window_lag</name>
+    <value>1</value>
+    <description>The maximum time lag for timestamps. Timestamps cannot arrive out-of-order by more than this amount.</description>
+    <display-name>Window Time Lag</display-name>
+  </property>
+  <property>
+    <name>profiler_window_lag_units</name>
+    <value>MINUTES</value>
+    <description>The units used to specify the Event Time Lag.</description>
+    <display-name>Window Lag Units</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>HOURS</value>
+        </entry>
+        <entry>
+          <value>MINUTES</value>
+        </entry>
+        <entry>
+          <value>SECONDS</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>profiler_topology_message_timeout_secs</name>
+    <description>The maximum amount of time a message has to complete before it is considered failed.</description>
+    <display-name>Profiler Topology Message Timeout</display-name>
+    <value>900</value>
+  </property>
+  <property>
+    <name>profiler_topology_max_spout_pending</name>
+    <description>Profiler Topology Spout Max Pending Tuples</description>
+    <display-name>Spout Max Pending Tuples</display-name>
+    <value/>
+    <value-attributes>
+        <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>profiler_max_routes_per_bolt</name>
+    <value>100000</value>
+    <description>The max number of routes allowed per bolt. The number of routes increases as the number of profiles and entities increases.</description>
+    <display-name>Max Routes Per Bolt</display-name>
   </property>
   <property>
     <name>profiler_hbase_table</name>


[03/50] [abbrv] metron git commit: METRON-1485 Upgrade vagrant for dev environments closes apache/incubator-metron#959

Posted by rm...@apache.org.
METRON-1485 Upgrade vagrant for dev environments closes apache/incubator-metron#959


Project: http://git-wip-us.apache.org/repos/asf/metron/repo
Commit: http://git-wip-us.apache.org/repos/asf/metron/commit/e69ce213
Tree: http://git-wip-us.apache.org/repos/asf/metron/tree/e69ce213
Diff: http://git-wip-us.apache.org/repos/asf/metron/diff/e69ce213

Branch: refs/heads/feature/METRON-1416-upgrade-solr
Commit: e69ce213aaad84fd49d972c329a4bf4bf7d4b2ad
Parents: 26c5d30
Author: JonZeolla <ze...@gmail.com>
Authored: Thu Mar 15 14:09:28 2018 -0400
Committer: cstella <ce...@gmail.com>
Committed: Thu Mar 15 14:09:28 2018 -0400

----------------------------------------------------------------------
 metron-deployment/development/centos6/README.md  | 2 +-
 metron-deployment/development/ubuntu14/README.md | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/metron/blob/e69ce213/metron-deployment/development/centos6/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/development/centos6/README.md b/metron-deployment/development/centos6/README.md
index 5132c30..bd8553c 100644
--- a/metron-deployment/development/centos6/README.md
+++ b/metron-deployment/development/centos6/README.md
@@ -31,7 +31,7 @@ The computer used to deploy Apache Metron will need to have the following compon
 
  - [Ansible](https://github.com/ansible/ansible) (2.0.0.2 or 2.2.2.0)
  - [Docker](https://www.docker.com/community-edition)
- - [Vagrant](https://www.vagrantup.com) 1.8+
+ - [Vagrant](https://www.vagrantup.com) 2.0+
  - [Vagrant Hostmanager Plugin](https://github.com/devopsgroup-io/vagrant-hostmanager)
  - [Virtualbox](https://virtualbox.org) 5.0+
  - Python 2.7

http://git-wip-us.apache.org/repos/asf/metron/blob/e69ce213/metron-deployment/development/ubuntu14/README.md
----------------------------------------------------------------------
diff --git a/metron-deployment/development/ubuntu14/README.md b/metron-deployment/development/ubuntu14/README.md
index af7d3a3..5856911 100644
--- a/metron-deployment/development/ubuntu14/README.md
+++ b/metron-deployment/development/ubuntu14/README.md
@@ -31,7 +31,7 @@ The computer used to deploy Apache Metron will need to have the following compon
 
  - [Ansible](https://github.com/ansible/ansible) (2.0.0.2 or 2.2.2.0)
  - [Docker](https://www.docker.com/community-edition)
- - [Vagrant](https://www.vagrantup.com) 1.8+
+ - [Vagrant](https://www.vagrantup.com) 2.0+
  - [Vagrant Hostmanager Plugin](https://github.com/devopsgroup-io/vagrant-hostmanager)
  - [Virtualbox](https://virtualbox.org) 5.0+
  - Python 2.7