You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@eagle.apache.org by ha...@apache.org on 2016/12/30 08:28:04 UTC

[12/14] eagle git commit: [MINOR] Migrate 0.5.0-incubating-SNAPSHOT to 0.5.0-SNAPSHOT

http://git-wip-us.apache.org/repos/asf/eagle/blob/8b3729f9/eagle-assembly/src/main/docs/kafka.rb
----------------------------------------------------------------------
diff --git a/eagle-assembly/src/main/docs/kafka.rb b/eagle-assembly/src/main/docs/kafka.rb
deleted file mode 100644
index 4c5f9b3..0000000
--- a/eagle-assembly/src/main/docs/kafka.rb
+++ /dev/null
@@ -1,191 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-require 'logstash/namespace'
-require 'logstash/outputs/base'
-require 'jruby-kafka'
-
-# Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on
-# the broker.
-#
-# The only required configuration is the topic name. The default codec is json,
-# so events will be persisted on the broker in json format. If you select a codec of plain,
-# Logstash will encode your messages with not only the message but also with a timestamp and
-# hostname. If you do not want anything but your message passing through, you should make the output
-# configuration something like:
-# [source,ruby]
-#     output {
-#       kafka {
-#         codec => plain {
-#            format => "%{message}"
-#         }
-#       }
-#     }
-# For more information see http://kafka.apache.org/documentation.html#theproducer
-#
-# Kafka producer configuration: http://kafka.apache.org/documentation.html#producerconfigs
-class LogStash::Outputs::Kafka < LogStash::Outputs::Base
-  config_name 'kafka'
-  milestone 2
-
-  default :codec, 'json'
-  # This is for bootstrapping and the producer will only use it for getting metadata (topics,
-  # partitions and replicas). The socket connections for sending the actual data will be
-  # established based on the broker information returned in the metadata. The format is
-  # `host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a
-  # subset of brokers.
-  config :broker_list, :validate => :string, :default => 'localhost:9092'
-  # The topic to produce the messages to
-  config :topic_id, :validate => :string, :required => true
-  # This parameter allows you to specify the compression codec for all data generated by this
-  # producer. Valid values are `none`, `gzip` and `snappy`.
-  config :compression_codec, :validate => %w( none gzip snappy ), :default => 'none'
-  # This parameter allows you to set whether compression should be turned on for particular
-  # topics. If the compression codec is anything other than `NoCompressionCodec`,
-  # enable compression only for specified topics if any. If the list of compressed topics is
-  # empty, then enable the specified compression codec for all topics. If the compression codec
-  # is `NoCompressionCodec`, compression is disabled for all topics
-  config :compressed_topics, :validate => :string, :default => ''
-  # This value controls when a produce request is considered completed. Specifically,
-  # how many other brokers must have committed the data to their log and acknowledged this to the
-  # leader. For more info, see -- http://kafka.apache.org/documentation.html#producerconfigs
-  config :request_required_acks, :validate => [-1,0,1], :default => 0
-  # The serializer class for messages. The default encoder takes a byte[] and returns the same byte[]
-  config :serializer_class, :validate => :string, :default => 'kafka.serializer.StringEncoder'
-  # The partitioner class for partitioning messages amongst partitions in the topic. The default
-  # partitioner is based on the hash of the key. If the key is null,
-  # the message is sent to a random partition in the broker.
-  # NOTE: `topic_metadata_refresh_interval_ms` controls how long the producer will distribute to a
-  # partition in the topic. This defaults to 10 mins, so the producer will continue to write to a
-  # single partition for 10 mins before it switches
-  config :partitioner_class, :validate => :string, :default => 'kafka.producer.DefaultPartitioner'
-  # The amount of time the broker will wait trying to meet the `request.required.acks` requirement
-  # before sending back an error to the client.
-  config :request_timeout_ms, :validate => :number, :default => 10000
-  # This parameter specifies whether the messages are sent asynchronously in a background thread.
-  # Valid values are (1) async for asynchronous send and (2) sync for synchronous send. By
-  # setting the producer to async we allow batching together of requests (which is great for
-  # throughput) but open the possibility of a failure of the client machine dropping unsent data.
-  config :producer_type, :validate => %w( sync async ), :default => 'sync'
-  # The serializer class for keys (defaults to the same as for messages if nothing is given)
-  config :key_serializer_class, :validate => :string, :default => 'kafka.serializer.StringEncoder'
-  # This property will cause the producer to automatically retry a failed send request. This
-  # property specifies the number of retries when such failures occur. Note that setting a
-  # non-zero value here can lead to duplicates in the case of network errors that cause a message
-  # to be sent but the acknowledgement to be lost.
-  config :message_send_max_retries, :validate => :number, :default => 3
-  # Before each retry, the producer refreshes the metadata of relevant topics to see if a new
-  # leader has been elected. Since leader election takes a bit of time,
-  # this property specifies the amount of time that the producer waits before refreshing the
-  # metadata.
-  config :retry_backoff_ms, :validate => :number, :default => 100
-  # The producer generally refreshes the topic metadata from brokers when there is a failure
-  # (partition missing, leader not available...). It will also poll regularly (default: every
-  # 10min so 600000ms). If you set this to a negative value, metadata will only get refreshed on
-  # failure. If you set this to zero, the metadata will get refreshed after each message sent
-  # (not recommended). Important note: the refresh happen only AFTER the message is sent,
-  # so if the producer never sends a message the metadata is never refreshed
-  config :topic_metadata_refresh_interval_ms, :validate => :number, :default => 600 * 1000
-  # Maximum time to buffer data when using async mode. For example a setting of 100 will try to
-  # batch together 100ms of messages to send at once. This will improve throughput but adds
-  # message delivery latency due to the buffering.
-  config :queue_buffering_max_ms, :validate => :number, :default => 5000
-  # The maximum number of unsent messages that can be queued up the producer when using async
-  # mode before either the producer must be blocked or data must be dropped.
-  config :queue_buffering_max_messages, :validate => :number, :default => 10000
-  # The amount of time to block before dropping messages when running in async mode and the
-  # buffer has reached `queue.buffering.max.messages`. If set to 0 events will be enqueued
-  # immediately or dropped if the queue is full (the producer send call will never block). If set
-  # to -1 the producer will block indefinitely and never willingly drop a send.
-  config :queue_enqueue_timeout_ms, :validate => :number, :default => -1
-  # The number of messages to send in one batch when using async mode. The producer will wait
-  # until either this number of messages are ready to send or `queue.buffer.max.ms` is reached.
-  config :batch_num_messages, :validate => :number, :default => 200
-  # Socket write buffer size
-  config :send_buffer_bytes, :validate => :number, :default => 100 * 1024
-  # The client id is a user-specified string sent in each request to help trace calls. It should
-  # logically identify the application making the request.
-  config :client_id, :validate => :string, :default => ''
-  # Provides a way to specify a partition key as a string. To specify a partition key for
-  # Kafka, configure a format that will produce the key as a string. Defaults
-  # `key_serializer_class` to `kafka.serializer.StringEncoder` to match. For example, to partition
-  # by host:
-  # [source,ruby]
-  #     output {
-  #       kafka {
-  #           partition_key_format => "%{host}"
-  #       }
-  #     }
-  config :partition_key_format, :validate => :string, :default => nil
-
-  public
-  def register
-    LogStash::Logger.setup_log4j(@logger)
-
-    options = {
-        :broker_list => @broker_list,
-        :compression_codec => @compression_codec,
-        :compressed_topics => @compressed_topics,
-        :request_required_acks => @request_required_acks,
-        :serializer_class => @serializer_class,
-        :partitioner_class => @partitioner_class,
-        :request_timeout_ms => @request_timeout_ms,
-        :producer_type => @producer_type,
-        :key_serializer_class => @key_serializer_class,
-        :message_send_max_retries => @message_send_max_retries,
-        :retry_backoff_ms => @retry_backoff_ms,
-        :topic_metadata_refresh_interval_ms => @topic_metadata_refresh_interval_ms,
-        :queue_buffering_max_ms => @queue_buffering_max_ms,
-        :queue_buffering_max_messages => @queue_buffering_max_messages,
-        :queue_enqueue_timeout_ms => @queue_enqueue_timeout_ms,
-        :batch_num_messages => @batch_num_messages,
-        :send_buffer_bytes => @send_buffer_bytes,
-        :client_id => @client_id
-    }
-    @producer = Kafka::Producer.new(options)
-    @producer.connect
-
-    @logger.info('Registering kafka producer', :topic_id => @topic_id, :broker_list => @broker_list)
-
-    @codec.on_event do |data|
-      begin
-        @producer.send_msg(@current_topic_id,@partition_key,data)
-      rescue LogStash::ShutdownSignal
-        @logger.info('Kafka producer got shutdown signal')
-      rescue => e
-        @logger.warn('kafka producer threw exception, restarting',
-                     :exception => e)
-      end
-    end
-  end # def register
-
-  def receive(event)
-    return unless output?(event)
-    if event == LogStash::SHUTDOWN
-      finished
-      return
-    end
-    @partition_key = if @partition_key_format.nil? then nil else event.sprintf(@partition_key_format) end
-    @current_topic_id = if @topic_id.nil? then nil else event.sprintf(@topic_id) end
-    @codec.encode(event)
-    @partition_key = nil
-    @current_topic_id = nil
-  end
-
-  def teardown
-    @producer.close
-  end
-end #class LogStash::Outputs::Kafka
-

http://git-wip-us.apache.org/repos/asf/eagle/blob/8b3729f9/eagle-assembly/src/main/docs/logstash-kafka-conf.md
----------------------------------------------------------------------
diff --git a/eagle-assembly/src/main/docs/logstash-kafka-conf.md b/eagle-assembly/src/main/docs/logstash-kafka-conf.md
deleted file mode 100644
index 9003fb4..0000000
--- a/eagle-assembly/src/main/docs/logstash-kafka-conf.md
+++ /dev/null
@@ -1,207 +0,0 @@
-<!--
-{% comment %}
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-{% endcomment %}
--->
-
-# Logstash-kafka 
-
-### Install logstash-kafka plugin
-
-
-#### For Logstash 1.5.x, 2.x
-
-
-logstash-kafka has been intergrated into [logstash-input-kafka][logstash-input-kafka] and [logstash-output-kafka][logstash-output-kafka], you can directly use it.
-
-[logstash-input-kafka]: https://github.com/logstash-plugins/logstash-input-kafka
-[logstash-output-kafka]: https://github.com/logstash-plugins/logstash-output-kafka
-
-#### For Logstash 1.4.x
-
-In logstash 1.4.x, the online version does not support specifying partition\_key for Kafka producer, and data will be produced into each partitions in turn. For eagle, we need to use the src in hdfs\_audit\_log as the partition key, so some hacking work have been done. If you have the same requirment, you can follow it.
-
-1. Install logstash-kafka
-
-        cd /path/to/logstash
-        GEM_HOME=vendor/bundle/jruby/1.9 GEM_PATH= java -jar vendor/jar/jruby-complete-1.7.11.jar -S gem install logstash-kafka
-        cp -R vendor/bundle/jruby/1.9/gems/logstash-kafka-*-java/{lib/logstash/*,spec/*} {lib/logstash/,spec/}
-        # test install
-        USE_JRUBY=1 bin/logstash rspec spec/**/kafka*.rb
-
-    or
-
-        cd /path/to/logstash-kafka
-        make tarball
-        <!-- a tarball package will be generated under build, including logstash -->
-
-2. Hacking the kafka.rb
-
-   We have added partition\_key\_format, which is used to specify the partition_key and supported by logstash 1.5.x, into  lib/logstash/outputs/kafka.rb. More details are shown [here](https://github.xyz.com/eagle/eagle/blob/master/eagle-assembly/src/main/docs/kafka.rb).
-
-          config :partition_key_format, :validate => :string, :default => nil
-
-          public
-          def register
-            LogStash::Logger.setup_log4j(@logger)
-
-            options = {
-                :broker_list => @broker_list,
-                :compression_codec => @compression_codec,
-                :compressed_topics => @compressed_topics,
-                :request_required_acks => @request_required_acks,
-                :serializer_class => @serializer_class,
-                :partitioner_class => @partitioner_class,
-                :request_timeout_ms => @request_timeout_ms,
-                :producer_type => @producer_type,
-                :key_serializer_class => @key_serializer_class,
-                :message_send_max_retries => @message_send_max_retries,
-                :retry_backoff_ms => @retry_backoff_ms,
-                :topic_metadata_refresh_interval_ms => @topic_metadata_refresh_interval_ms,
-                :queue_buffering_max_ms => @queue_buffering_max_ms,
-                :queue_buffering_max_messages => @queue_buffering_max_messages,
-                :queue_enqueue_timeout_ms => @queue_enqueue_timeout_ms,
-                :batch_num_messages => @batch_num_messages,
-                :send_buffer_bytes => @send_buffer_bytes,
-                :client_id => @client_id
-            }
-            @producer = Kafka::Producer.new(options)
-            @producer.connect
-
-            @logger.info('Registering kafka producer', :topic_id => @topic_id, :broker_list => @broker_list)
-
-            @codec.on_event do |data|
-              begin
-                @producer.send_msg(@current_topic_id,@partition_key,data)
-              rescue LogStash::ShutdownSignal
-                @logger.info('Kafka producer got shutdown signal')
-              rescue => e
-                @logger.warn('kafka producer threw exception, restarting',
-                             :exception => e)
-              end
-            end
-          end # def register
-
-          def receive(event)
-            return unless output?(event)
-            if event == LogStash::SHUTDOWN
-              finished
-              return
-            end
-            @partition_key = if @partition_key_format.nil? then nil else event.sprintf(@partition_key_format) end
-            @current_topic_id = if @topic_id.nil? then nil else event.sprintf(@topic_id) end
-            @codec.encode(event)
-            @partition_key = nil
-            @current_topic_id = nil
-          end
-
-
-### Create logstash configuration file
-Go to the logstash root dir, and create a configure file
-The 2.0 release of Logstash includes a new version of the Kafka output plugin with significant configuration changes. For more details, please check the documentation pages for the [Logstash1.5](https://www.elastic.co/guide/en/logstash/1.5/plugins-outputs-kafka.html) and [Logstash2.0](https://www.elastic.co/guide/en/logstash/2.0/plugins-outputs-kafka.html) version of the kafka output plugin.
-
-#### For Logstash 1.4.X, 1.5.X
-
-        input {
-            file {
-                type => "hdp-nn-audit"
-                path => "/path/to/audit.log"
-                start_position => end
-                sincedb_path => "/var/log/logstash/"
-             }
-        }
-
-        filter{
-            if [type] == "hdp-nn-audit" {
-        	   grok {
-        	       match => ["message", "ugi=(?<user>([\w\d\-]+))@|ugi=(?<user>([\w\d\-]+))/[\w\d\-.]+@|ugi=(?<user>([\w\d.\-_]+))[\s(]+"]
-        	   }
-            }
-        }
-
-        output {
-            if [type] == "hdp-nn-audit" {
-                kafka {
-                    codec => plain {
-                        format => "%{message}"
-                    }
-                    broker_list => "localhost:9092"
-                    topic_id => "hdfs_audit_log"
-                    request_required_acks => 0
-                    request_timeout_ms => 10000
-                    producer_type => "async"
-                    message_send_max_retries => 3
-                    retry_backoff_ms => 100
-                    queue_buffering_max_ms => 5000
-                    queue_enqueue_timeout_ms => 5000
-                    batch_num_messages => 200
-                    send_buffer_bytes => 102400
-                    client_id => "hdp-nn-audit"
-                    partition_key_format => "%{user}"
-                }
-                # stdout { codec => rubydebug }
-            }
-        }
-
-#### For Logstash 2.X
-
-		input {
-			file {
-				type => "hdp-nn-audit"
-				path => "/path/to/audit.log"
-				start_position => end
-				sincedb_path => "/var/log/logstash/"
-			}
-		}
-
-
-		filter{
-			if [type] == "hdp-nn-audit" {
-			  grok {
-				  match => ["message", "ugi=(?<user>([\w\d\-]+))@|ugi=(?<user>([\w\d\-]+))/[\w\d\-.]+@|ugi=(?<user>([\w\d.\-_]+))[\s(]+"]
-			  }
-			}
-		}
-
-		output {
-			 if [type] == "hdp-nn-audit" {
-				  kafka {
-					  codec => plain {
-						  format => "%{message}"
-					  }
-					  bootstrap_servers => "localhost:9092"
-					  topic_id => "hdfs_audit_log"
-					  acks => \u201c0\u201d
-					  timeout_ms => 10000
-					  retries => 3
-					  retry_backoff_ms => 100
-					  batch_size => 16384
-					  send_buffer_bytes => 131072
-					  client_id => "hdp-nn-audit"
-				  }
-				  # stdout { codec => rubydebug }
-			  }
-		}
-
-#### grok pattern testing
-We have 3 typical patterns for ugi field as follows
-2015-02-11 15:00:00,000 INFO FSNamesystem.audit: allowed=true	ugi=user1@xyz.com (auth:TOKEN)	ip=/10.115.44.55	cmd=open	src=/apps/hdmi-technology/b_pulsar_coe/schema/avroschema/Session.avsc	dst=null	perm=null
-2015-02-11 15:00:00,000 INFO FSNamesystem.audit: allowed=true	ugi=hdc_uc4_platform (auth:TOKEN) via sg_adm@xyz.com (auth:TOKEN)	ip=/10.115.11.54	cmd=open	src=/sys/soj/event/2015/02/08/same_day/00000000000772509716119204458864#3632400774990000-949461-r-01459.avro	dst=null	perm=null
-
-### Reference Links
-1. [logstash-kafka](https://github.com/joekiller/logstash-kafka)
-2. [logstash](https://github.com/elastic/logstash)
-

http://git-wip-us.apache.org/repos/asf/eagle/blob/8b3729f9/eagle-assembly/src/main/examples/eagle-sandbox-starter.sh
----------------------------------------------------------------------
diff --git a/eagle-assembly/src/main/examples/eagle-sandbox-starter.sh b/eagle-assembly/src/main/examples/eagle-sandbox-starter.sh
deleted file mode 100644
index 286bc87..0000000
--- a/eagle-assembly/src/main/examples/eagle-sandbox-starter.sh
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-source $(dirname $0)/../bin/eagle-env.sh
-eagle_bin=$EAGLE_HOME/bin
-
-
-###################################################################
-#             STEP 1: Check Installation
-###################################################################
-
-echo "STEP [1/3]: checking environment"
-$eagle_bin/eagle-check-env.sh
-[ $? != 0 ] && exit 1
-
-pid_dir=/var/run
-
-# Check HBase if it has been started
-hbase_master_pid=${pid_dir}/hbase/hbase-hbase-master.pid
-hbase_regionserver_pid=${pid_dir}/hbase/hbase-hbase-regionserver.pid
-echo "Checking if hbase is running ..."
-
-if [ -f $hbase_master_pid ] && \
-	ps aux | grep -v grep | grep $(cat $hbase_master_pid) > /dev/null
-then
-	echo "HBase Master is running as process `cat $hbase_master_pid`."
-else
-	echo "Error: HBase Master is not running. Please start it via Ambari."
-	exit 1
-fi
-
-if [ -f $hbase_regionserver_pid ] && \
-	ps aux | grep -v grep | grep $(cat $hbase_regionserver_pid) > /dev/null
-then
-	echo "HBase RegionServer is running as process `cat $hbase_regionserver_pid`."
-else
-	echo "Error: HBase RegionServer is not running. Please start it via Ambari."
-	exit 1
-fi
-
-# Check kafka if it has been started
-kafka_pid=$pid_dir/kafka/kafka.pid
-echo "Checking if kafka is running ..."
-
-if [ -f $kafka_pid ] && ps aux | grep -v grep | grep $(cat $kafka_pid) > /dev/null
-then
-	echo "Kafka is running as process `cat $kafka_pid`."
-else
-	echo "Error: Kafka is not running. Please start it via Ambari."
-	exit 1
-fi
-
-# Check storm if it has been started
-nimbus_pid=$pid_dir/storm/nimbus.pid
-supervisor_pid=$pid_dir/storm/supervisor.pid
-ui_pid=$pid_dir/storm/ui.pid
-echo "Checking if storm is running ..."
-
-if ! ([ -f $nimbus_pid ] && ps aux | grep -v grep | grep $(cat $nimbus_pid) > /dev/null)
-then
-    echo "Error: Storm Nimbus is not running"
-    exit 1
-fi
-
-if ! ([ -f $supervisor_pid ] && ps aux | grep -v grep | grep $(cat $supervisor_pid) > /dev/null)
-then
-    echo "Error: Storm Supervisor is not running"
-    exit 1
-fi
-
-if ! ([ -f $ui_pid ] && ps aux | grep -v grep | grep $(cat $ui_pid) > /dev/null)
-then
-    echo "Error: Storm UI is not running"
-    exit 1
-fi
-
-echo "Storm is running"
-
-
-###################################################################
-#              STEP 2: Starting Eagle Service
-###################################################################
-
-echo "STEP [2/3]: start eagle service"
-$eagle_bin/eagle-service.sh start
-
-
-###################################################################
-#              STEP 3: Starting Eagle Topology
-###################################################################
-
-echo "STEP [3/3]: start eagle topology"
-$eagle_bin/eagle-service-init.sh
-[ $? != 0 ] && exit 1
-
-echo "Creating kafka topics for eagle ... "
-KAFKA_HOME=/usr/hdp/current/kafka-broker
-EAGLE_ZOOKEEPER_QUORUM=localhost:2181
-topic=`${KAFKA_HOME}/bin/kafka-topics.sh --list --zookeeper $EAGLE_ZOOKEEPER_QUORUM --topic sandbox_hdfs_audit_log`
-if [ -z $topic ]; then
-	$KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper $EAGLE_ZOOKEEPER_QUORUM --replication-factor 1 --partitions 1 --topic sandbox_hdfs_audit_log
-fi
-
-if [ $? = 0 ]; then
-echo "==> Create kafka topic successfully for eagle"
-else
-echo "==> Failed, exiting"
-exit 1
-fi
-$eagle_bin/eagle-topology-init.sh
-[ $? != 0 ] && exit 1
-${EAGLE_HOME}/examples/sample-sensitivity-resource-create.sh
-[ $? != 0 ] && exit 1
-${EAGLE_HOME}/examples/sample-policy-create.sh
-[ $? != 0 ] && exit 1
-$eagle_bin/eagle-topology.sh --main org.apache.eagle.security.auditlog.HdfsAuditLogProcessorMain --config ${EAGLE_HOME}/conf/sandbox-hdfsAuditLog-application.conf start
-[ $? != 0 ] && exit 1
-$eagle_bin/eagle-topology.sh --main org.apache.eagle.security.hive.jobrunning.HiveJobRunningMonitoringMain --config ${EAGLE_HOME}/conf/sandbox-hiveQueryLog-application.conf start
-[ $? != 0 ] && exit 1
-$eagle_bin/eagle-topology.sh --main org.apache.eagle.security.userprofile.UserProfileDetectionMain --config ${EAGLE_HOME}/conf/sandbox-userprofile-topology.conf start
-[ $? != 0 ] && exit 1
-

http://git-wip-us.apache.org/repos/asf/eagle/blob/8b3729f9/eagle-assembly/src/main/examples/hadoop-metric-policy-create.sh
----------------------------------------------------------------------
diff --git a/eagle-assembly/src/main/examples/hadoop-metric-policy-create.sh b/eagle-assembly/src/main/examples/hadoop-metric-policy-create.sh
deleted file mode 100644
index 9e9e861..0000000
--- a/eagle-assembly/src/main/examples/hadoop-metric-policy-create.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with`
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-source $(dirname $0)/../bin/eagle-env.sh
-
-##### add policies ##########
-echo ""
-echo "Importing policy: safeModePolicy "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 'Content-Type:application/json' \
- "http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertDefinitionService" \
- -d '
- [
-     {
-       "prefix": "alertdef",
-       "tags": {
-         "site": "sandbox",
-         "application": "hadoopJmxMetricDataSource",
-         "policyId": "safeModePolicy",
-         "alertExecutorId": "hadoopJmxMetricAlertExecutor",
-         "policyType": "siddhiCEPEngine"
-       },
-       "description": "jmx metric ",
-       "policyDef": "{\"expression\":\"from hadoopJmxMetricEventStream[component==\\\"namenode\\\" and metric == \\\"hadoop.namenode.fsnamesystemstate.fsstate\\\" and convert(value, \\\"long\\\") > 0]#window.externalTime(timestamp ,10 min) select metric, host, value, timestamp, component, site insert into tmp; \",\"type\":\"siddhiCEPEngine\"}",
-       "enabled": true,
-       "dedupeDef": "{\"alertDedupIntervalMin\":10,\"emailDedupIntervalMin\":10}",
-       "notificationDef": "[{\"sender\":\"eagle@apache.org\",\"recipients\":\"eagle@apache.org\",\"subject\":\"missing block found.\",\"flavor\":\"email\",\"id\":\"email_1\",\"tplFileName\":\"\"}]"
-     }
- ]
- '
-
-exit 0
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/eagle/blob/8b3729f9/eagle-assembly/src/main/examples/hadoop-metric-sandbox-starter.sh
----------------------------------------------------------------------
diff --git a/eagle-assembly/src/main/examples/hadoop-metric-sandbox-starter.sh b/eagle-assembly/src/main/examples/hadoop-metric-sandbox-starter.sh
deleted file mode 100644
index 8f227b0..0000000
--- a/eagle-assembly/src/main/examples/hadoop-metric-sandbox-starter.sh
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-source $(dirname $0)/../bin/eagle-env.sh
-eagle_bin=$EAGLE_HOME/bin
-
-
-###################################################################
-#             STEP 1: Check Installation
-###################################################################
-
-echo "STEP [1/3]: checking environment"
-$eagle_bin/eagle-check-env.sh
-[ $? != 0 ] && exit 1
-
-pid_dir=/var/run
-
-# Check HBase if it has been started
-hbase_master_pid=${pid_dir}/hbase/hbase-hbase-master.pid
-hbase_regionserver_pid=${pid_dir}/hbase/hbase-hbase-regionserver.pid
-echo "Checking if hbase is running ..."
-
-if [ -f $hbase_master_pid ] && \
-	ps aux | grep -v grep | grep $(cat $hbase_master_pid) > /dev/null
-then
-	echo "HBase Master is running as process `cat $hbase_master_pid`."
-else
-	echo "Error: HBase Master is not running. Please start it via Ambari."
-	exit 1
-fi
-
-if [ -f $hbase_regionserver_pid ] && \
-	ps aux | grep -v grep | grep $(cat $hbase_regionserver_pid) > /dev/null
-then
-	echo "HBase RegionServer is running as process `cat $hbase_regionserver_pid`."
-else
-	echo "Error: HBase RegionServer is not running. Please start it via Ambari."
-	exit 1
-fi
-
-# Check kafka if it has been started
-kafka_pid=$pid_dir/kafka/kafka.pid
-echo "Checking if kafka is running ..."
-
-if [ -f $kafka_pid ] && ps aux | grep -v grep | grep $(cat $kafka_pid) > /dev/null
-then
-	echo "Kafka is running as process `cat $kafka_pid`."
-else
-	echo "Error: Kafka is not running. Please start it via Ambari."
-	exit 1
-fi
-
-# Check storm if it has been started
-nimbus_pid=$pid_dir/storm/nimbus.pid
-supervisor_pid=$pid_dir/storm/supervisor.pid
-ui_pid=$pid_dir/storm/ui.pid
-echo "Checking if storm is running ..."
-
-if ! ([ -f $nimbus_pid ] && ps aux | grep -v grep | grep $(cat $nimbus_pid) > /dev/null)
-then
-    echo "Error: Storm Nimbus is not running"
-    exit 1
-fi
-
-if ! ([ -f $supervisor_pid ] && ps aux | grep -v grep | grep $(cat $supervisor_pid) > /dev/null)
-then
-    echo "Error: Storm Supervisor is not running"
-    exit 1
-fi
-
-if ! ([ -f $ui_pid ] && ps aux | grep -v grep | grep $(cat $ui_pid) > /dev/null)
-then
-    echo "Error: Storm UI is not running"
-    exit 1
-fi
-
-echo "Storm is running"
-
-
-###################################################################
-#              STEP 2: Starting Eagle Service
-###################################################################
-
-echo "STEP [2/3]: Start Eagle Service"
-$eagle_bin/eagle-service.sh start
-
-
-###################################################################
-#              STEP 3: Starting Eagle Topology
-###################################################################
-
-echo "STEP [3/3]: Init Eagle Service"
-$eagle_bin/eagle-service-init.sh
-[ $? != 0 ] && exit 1
-
-echo "Creating kafka topics for eagle ... "
-KAFKA_HOME=/usr/hdp/current/kafka-broker
-EAGLE_ZOOKEEPER_QUORUM=localhost:2181
-topic=`${KAFKA_HOME}/bin/kafka-topics.sh --list --zookeeper $EAGLE_ZOOKEEPER_QUORUM --topic nn_jmx_metric_sandbox`
-if [ -z $topic ]; then
-	$KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper $EAGLE_ZOOKEEPER_QUORUM --replication-factor 1 --partitions 1 --topic nn_jmx_metric_sandbox
-fi
-
-if [ $? = 0 ]; then
-echo "==> Create kafka topic successfully for Hadoop Metric Monitoring"
-else
-echo "==> Failed, exiting"
-exit 1
-fi
-$eagle_bin/hadoop-metric-monitor.sh
-[ $? != 0 ] && exit 1
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/eagle/blob/8b3729f9/eagle-assembly/src/main/examples/sample-policy-create.sh
----------------------------------------------------------------------
diff --git a/eagle-assembly/src/main/examples/sample-policy-create.sh b/eagle-assembly/src/main/examples/sample-policy-create.sh
deleted file mode 100644
index 71e099e..0000000
--- a/eagle-assembly/src/main/examples/sample-policy-create.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-source $(dirname $0)/../bin/eagle-env.sh
-
-su hdfs -c "hdfs dfs -touchz /tmp/private"
-#su hdfs -c "hdfs dfs -touchz /tmp/sensitive"
-
-#### create hdfs policy sample in sandbox
-echo "create hdfs policy sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 'Content-Type:application/json' "http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertDefinitionService" -d \
-'[{"tags":{"site":"sandbox","application":"hdfsAuditLog","alertExecutorId":"hdfsAuditLogAlertExecutor","policyId":"viewPrivate","policyType":"siddhiCEPEngine"},"desc":"view private file","policyDef":"{\"type\":\"siddhiCEPEngine\",\"expression\":\"from hdfsAuditLogEventStream[(cmd=='\'open\'') and (src=='\'/tmp/private\'')] select * insert into outputStream\"}","dedupeDef": "{\"alertDedupIntervalMin\":0,\"emailDedupIntervalMin\":1440}","notificationDef": "[{\"subject\":\"just for test\",\"sender\":\"nobody@test.com\",\"recipients\":\"nobody@test.com\",\"flavor\":\"email\",\"id\":\"email_1\",\"tplFileName\":\"\"}]","remediationDef":"","enabled":true}]'
-
-#### create hive policy sample in sandbox
-echo "create hive policy sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 'Content-Type:application/json' "http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertDefinitionService" -d \
-'[{"tags":{"site":"sandbox","application":"hiveQueryLog","alertExecutorId":"hiveAccessAlertByRunningJob","policyId":"queryPhoneNumber","policyType":"siddhiCEPEngine"},"desc":"query sensitive hive data","policyDef":"{\"type\":\"siddhiCEPEngine\",\"expression\":\"from hiveAccessLogStream[(sensitivityType=='\'PHONE_NUMBER\'')] select * insert into outputStream;\"}","dedupeDef": "{\"alertDedupIntervalMin\":0,\"emailDedupIntervalMin\":1440}","notificationDef": "[{\"subject\":\"just for test\",\"sender\":\"nobody@test.com\",\"recipients\":\"nobody@test.com\",\"flavor\":\"email\",\"id\":\"email_1\",\"tplFileName\":\"\"}]","remediationDef":"","enabled":"true"}]'

http://git-wip-us.apache.org/repos/asf/eagle/blob/8b3729f9/eagle-assembly/src/main/examples/sample-sensitivity-resource-create.sh
----------------------------------------------------------------------
diff --git a/eagle-assembly/src/main/examples/sample-sensitivity-resource-create.sh b/eagle-assembly/src/main/examples/sample-sensitivity-resource-create.sh
deleted file mode 100644
index 4a8f06f..0000000
--- a/eagle-assembly/src/main/examples/sample-sensitivity-resource-create.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-source $(dirname $0)/../bin/eagle-env.sh
-
-#### create hive sensitivity sample in sandbox
-echo "create hive sensitivity sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 'Content-Type:application/json' "http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=HiveResourceSensitivityService" -d '[{"tags":{"site" : "sandbox", "hiveResource":"/xademo/customer_details/phone_number"}, "sensitivityType": "PHONE_NUMBER"}]'
-
-
-#### create hdfs sensitivity sample in sandbox
-echo "create hdfs sensitivity sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 'Content-Type:application/json' "http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=FileSensitivityService" -d '[{"tags":{"site" : "sandbox", "filedir":"/tmp/private"}, "sensitivityType": "PRIVATE"}]'
-
-#### create hbase sensitivity sample in sandbox
-echo "create hdfs sensitivity sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 'Content-Type:application/json' "http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=HbaseResourceSensitivityService" -d '[{"tags":{"site":"sandbox","hbaseResource":"default:alertStreamSchema"},"sensitivityType":"PrivateTable"}]'
-

http://git-wip-us.apache.org/repos/asf/eagle/blob/8b3729f9/eagle-assembly/src/main/lib/jdbc/eagle-jdbc-mysql.sql
----------------------------------------------------------------------
diff --git a/eagle-assembly/src/main/lib/jdbc/eagle-jdbc-mysql.sql b/eagle-assembly/src/main/lib/jdbc/eagle-jdbc-mysql.sql
deleted file mode 100644
index d4be66c..0000000
--- a/eagle-assembly/src/main/lib/jdbc/eagle-jdbc-mysql.sql
+++ /dev/null
@@ -1,331 +0,0 @@
--- /*
---  * Licensed to the Apache Software Foundation (ASF) under one or more
---  * contributor license agreements.  See the NOTICE file distributed with
---  * this work for additional information regarding copyright ownership.
---  * The ASF licenses this file to You under the Apache License, Version 2.0
---  * (the "License"); you may not use this file except in compliance with
---  * the License.  You may obtain a copy of the License at
---  *
---  *    http://www.apache.org/licenses/LICENSE-2.0
---  *
---  * Unless required by applicable law or agreed to in writing, software
---  * distributed under the License is distributed on an "AS IS" BASIS,
---  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
---  * See the License for the specific language governing permissions and
---  * limitations under the License.
---  *
---  */
-
--- MySQL dump 10.13  Distrib 5.6.23, for osx10.8 (x86_64)
---
--- Host: localhost    Database: eagle
--- ------------------------------------------------------
--- Server version	5.6.23
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `alertdef_alertdef`
---
-
-DROP TABLE IF EXISTS `alertdef_alertdef`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `alertdef_alertdef` (
-  `uuid` varchar(100) COLLATE utf8_bin NOT NULL,
-  `timestamp` bigint(20) DEFAULT NULL,
-  `site` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `application` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `alertexecutorid` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `policyid` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `policytype` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `description` varchar(200) COLLATE utf8_bin DEFAULT NULL,
-  `policydef` varchar(1000) COLLATE utf8_bin DEFAULT NULL,
-  `dedupedef` varchar(1000) COLLATE utf8_bin DEFAULT NULL,
-  `notificationdef` varchar(1000) COLLATE utf8_bin DEFAULT NULL,
-  `remediationdef` varchar(1000) COLLATE utf8_bin DEFAULT NULL,
-  `enabled` tinyint(1) DEFAULT NULL,
-  `owner` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `lastmodifieddate` bigint(20) DEFAULT NULL,
-  `severity` bigint(20) DEFAULT NULL,
-  `createdtime` bigint(20) DEFAULT NULL,
-  `markdownReason` varchar(1000) COLLATE utf8_bin DEFAULT NULL,
-  `markdownEnabled` tinyint(1) DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `uuid_UNIQUE` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `alertdef_alertdef`
---
-
-LOCK TABLES `alertdef_alertdef` WRITE;
-/*!40000 ALTER TABLE `alertdef_alertdef` DISABLE KEYS */;
-/*!40000 ALTER TABLE `alertdef_alertdef` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `alertdetail_hadoop`
---
-
-DROP TABLE IF EXISTS `alertdetail_hadoop`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `alertdetail_hadoop` (
-  `uuid` varchar(100) COLLATE utf8_bin NOT NULL,
-  `timestamp` bigint(20) DEFAULT NULL,
-  `site` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `application` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `hostname` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `policyid` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `alertsource` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `sourcestreams` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `alertexecutorid` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `description` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `remediationid` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `remediationcallback` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `alertcontext` varchar(1000) COLLATE utf8_bin DEFAULT NULL,
-  `streamid` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `uuid_UNIQUE` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `alertdetail_hadoop`
---
-
-LOCK TABLES `alertdetail_hadoop` WRITE;
-/*!40000 ALTER TABLE `alertdetail_hadoop` DISABLE KEYS */;
-/*!40000 ALTER TABLE `alertdetail_hadoop` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `alertexecutor_alertexecutor`
---
-
-DROP TABLE IF EXISTS `alertexecutor_alertexecutor`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `alertexecutor_alertexecutor` (
-  `uuid` varchar(100) COLLATE utf8_bin NOT NULL,
-  `timestamp` bigint(20) DEFAULT NULL,
-  `application` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `alertexecutorid` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `streamname` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `description` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `site` varchar(45) COLLATE utf8_bin DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `uuid_UNIQUE` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `alertexecutor_alertexecutor`
---
-
-LOCK TABLES `alertexecutor_alertexecutor` WRITE;
-/*!40000 ALTER TABLE `alertexecutor_alertexecutor` DISABLE KEYS */;
-INSERT INTO `alertexecutor_alertexecutor` VALUES ('0ijKT3_____62aP_uMZ-K1SsoVDrH3vKa382HVykBVAJItDs',0,'hiveQueryLog','hiveAccessAlertByRunningJob','hiveAccessLogStream','alert executor for hive query log event stream',NULL),('0ijKT3_____62aP_uMZ-K2-GR_rrH3vKDuSvMwA130dvL77HXKQFUNuQa14',0,'userProfile','userProfileAnomalyDetectionExecutor','userActivity','user activity data source','sandbox'),('0ijKT3_____62aP_uMZ-K4uAAAjrH3vKhK_cHVykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogAlertExecutor','hdfsAuditLogEventStream','alert executor for hdfs audit log event stream',NULL),('0ijKT3_____62aP_uMZ-K_85ls_rH3vK8F7dJFykBVCMCnLr',0,'hbaseSecurityLog','hbaseSecurityLogAlertExecutor','hbaseSecurityLogEventStream','alert executor for hbase security log event stream',NULL);
-/*!40000 ALTER TABLE `alertexecutor_alertexecutor` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `alertstream_alertstream`
---
-
-DROP TABLE IF EXISTS `alertstream_alertstream`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `alertstream_alertstream` (
-  `uuid` varchar(100) COLLATE utf8_bin NOT NULL,
-  `timestamp` bigint(20) DEFAULT NULL,
-  `application` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `streamname` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `description` varchar(1024) COLLATE utf8_bin DEFAULT NULL,
-  `site` varchar(45) COLLATE utf8_bin DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `uuid_UNIQUE` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `alertstream_alertstream`
---
-
-LOCK TABLES `alertstream_alertstream` WRITE;
-/*!40000 ALTER TABLE `alertstream_alertstream` DISABLE KEYS */;
-INSERT INTO `alertstream_alertstream` VALUES ('x3ZP_H_____62aP_uMZ-K2-GR_oANd9Hby--x1ykBVDbkGte',0,'userProfile','userActivity',NULL,'sandbox');
-/*!40000 ALTER TABLE `alertstream_alertstream` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `alertstreamschema_alertstreamschema`
---
-
-DROP TABLE IF EXISTS `alertstreamschema_alertstreamschema`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `alertstreamschema_alertstreamschema` (
-  `uuid` varchar(100) COLLATE utf8_bin NOT NULL,
-  `timestamp` bigint(20) DEFAULT NULL,
-  `application` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `streamname` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `attrname` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `attrtype` varchar(20) COLLATE utf8_bin DEFAULT NULL,
-  `category` varchar(20) COLLATE utf8_bin DEFAULT NULL,
-  `attrValueResolver` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `usedastag` tinyint(1) DEFAULT NULL,
-  `attrdescription` varchar(1024) COLLATE utf8_bin DEFAULT NULL,
-  `attrdisplayname` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `defaultvalue` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `uuid_UNIQUE` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `alertstreamschema_alertstreamschema`
---
-
-LOCK TABLES `alertstreamschema_alertstreamschema` WRITE;
-/*!40000 ALTER TABLE `alertstreamschema_alertstreamschema` DISABLE KEYS */;
-INSERT INTO `alertstreamschema_alertstreamschema` VALUES ('iSeEvX_____62aP_uMZ-K1SsoVAhAmgc66vEDlykBVAJItDs',0,'hiveQueryLog','hiveAccessLogStream','resource','string','','org.apache.eagle.service.security.hive.resolver.HiveMetadataResolver',NULL,'/database/table/column or /database/table/*',NULL,NULL),('iSeEvX_____62aP_uMZ-K1SsoVAhAmgcA0kpFlykBVAJItDs',0,'hiveQueryLog','hiveAccessLogStream','timestamp','long','','',NULL,'milliseconds of the datetime',NULL,NULL),('iSeEvX_____62aP_uMZ-K1SsoVAhAmgcADbry1ykBVAJItDs',0,'hiveQueryLog','hiveAccessLogStream','user','string','','',NULL,'process user',NULL,NULL),('iSeEvX_____62aP_uMZ-K1SsoVAhAmgcOKXfS1ykBVAJItDs',0,'hiveQueryLog','hiveAccessLogStream','command','string','','org.apache.eagle.service.security.hive.resolver.HiveCommandResolver',NULL,'hive sql command, such as SELECT, INSERT and DELETE',NULL,NULL),('iSeEvX_____62aP_uMZ-K1SsoVAhAmgcX026eVykBVAJItDs',0,'hiveQueryLog','hiveAccessLogStream','sensitivityType','string','','org.apache.
 eagle.service.security.hive.resolver.HiveSensitivityTypeResolver',NULL,'mark such as PHONE_NUMBER',NULL,NULL),('iSeEvX_____62aP_uMZ-K4uAAAghAmgcA0kpFlykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogEventStream','timestamp','long','','',NULL,'milliseconds of the datetime',NULL,NULL),('iSeEvX_____62aP_uMZ-K4uAAAghAmgcAAG95FykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogEventStream','src','string','','org.apache.eagle.service.security.hdfs.resolver.HDFSResourceResolver',NULL,'source directory or file, such as /tmp',NULL,NULL),('iSeEvX_____62aP_uMZ-K4uAAAghAmgcAAGBOlykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogEventStream','cmd','string','','org.apache.eagle.service.security.hdfs.resolver.HDFSCommandResolver',NULL,'file/directory operation, such as getfileinfo, open, listStatus and so on',NULL,NULL),('iSeEvX_____62aP_uMZ-K4uAAAghAmgcAAGFxVykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogEventStream','dst','string','','org.apache.eagle.service.security.hdfs.resolver.HDFSResourceResolver',NULL,'destination di
 rectory, such as /tmp',NULL,NULL),('iSeEvX_____62aP_uMZ-K4uAAAghAmgcADD1qFykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogEventStream','host','string','','',NULL,'hostname, such as localhost',NULL,NULL),('iSeEvX_____62aP_uMZ-K4uAAAghAmgcADbry1ykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogEventStream','user','string','','',NULL,'process user',NULL,NULL),('iSeEvX_____62aP_uMZ-K4uAAAghAmgcMC9vDFykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogEventStream','securityZone','string','','',NULL,'',NULL,NULL),('iSeEvX_____62aP_uMZ-K4uAAAghAmgcX026eVykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogEventStream','sensitivityType','string','','org.apache.eagle.service.security.hdfs.resolver.HDFSSensitivityTypeResolver',NULL,'mark such as AUDITLOG, SECURITYLOG',NULL,NULL),('iSeEvX_____62aP_uMZ-K4uAAAghAmgcya4BqFykBVB3iZNS',0,'hdfsAuditLog','hdfsAuditLogEventStream','allowed','bool','','',NULL,'true, false or none',NULL,NULL),('iSeEvX_____62aP_uMZ-K_85ls8hAmgcA0kpFlykBVCMCnLr',0,'hbaseSecurityLog','hbaseSecurityLogEventS
 tream','timestamp','long','','',NULL,'milliseconds of the datetime',NULL,NULL),('iSeEvX_____62aP_uMZ-K_85ls8hAmgcADD1qFykBVCMCnLr',0,'hbaseSecurityLog','hbaseSecurityLogEventStream','host','string','','',NULL,'remote ip address to access hbase',NULL,NULL),('iSeEvX_____62aP_uMZ-K_85ls8hAmgcADbry1ykBVCMCnLr',0,'hbaseSecurityLog','hbaseSecurityLogEventStream','user','string','','',NULL,'hbase user',NULL,NULL),('iSeEvX_____62aP_uMZ-K_85ls8hAmgcBoM-VFykBVCMCnLr',0,'hbaseSecurityLog','hbaseSecurityLogEventStream','scope','string','','org.apache.eagle.service.security.hbase.resolver.HbaseMetadataResolver',NULL,'the resources which users are then granted specific permissions (Read, Write, Execute, Create, Admin) against',NULL,NULL),('iSeEvX_____62aP_uMZ-K_85ls8hAmgcQU7yj1ykBVCMCnLr',0,'hbaseSecurityLog','hbaseSecurityLogEventStream','request','string','','org.apache.eagle.service.security.hbase.resolver.HbaseRequestResolver',NULL,'',NULL,NULL),('iSeEvX_____62aP_uMZ-K_85ls8hAmgcX026eVykBVCMC
 nLr',0,'hbaseSecurityLog','hbaseSecurityLogEventStream','sensitivityType','string','','org.apache.eagle.service.security.hbase.resolver.HbaseSensitivityTypeResolver',NULL,'',NULL,NULL),('iSeEvX_____62aP_uMZ-K_85ls8hAmgcqy9-NlykBVCMCnLr',0,'hbaseSecurityLog','hbaseSecurityLogEventStream','action','string','','org.apache.eagle.service.security.hbase.resolver.HbaseActionResolver',NULL,'action types, such as read, write, create, execute, and admin',NULL,NULL),('iSeEvX_____62aP_uMZ-K_85ls8hAmgcys3P8lykBVCMCnLr',0,'hbaseSecurityLog','hbaseSecurityLogEventStream','status','string','','',NULL,'access status: allowed or denied',NULL,NULL);
-/*!40000 ALTER TABLE `alertstreamschema_alertstreamschema` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `eagleapplicationdesc_eagleapplicationdesc`
---
-
-DROP TABLE IF EXISTS `eagleapplicationdesc_eagleapplicationdesc`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `eagleapplicationdesc_eagleapplicationdesc` (
-  `uuid` varchar(100) COLLATE utf8_bin NOT NULL,
-  `timestamp` bigint(20) DEFAULT NULL,
-  `application` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `description` varchar(1024) COLLATE utf8_bin DEFAULT NULL,
-  `alias` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `groupName` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `features` blob,
-  `config` varchar(1024) COLLATE utf8_bin DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `uuid_UNIQUE` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `eagleapplicationdesc_eagleapplicationdesc`
---
-
-LOCK TABLES `eagleapplicationdesc_eagleapplicationdesc` WRITE;
-/*!40000 ALTER TABLE `eagleapplicationdesc_eagleapplicationdesc` DISABLE KEYS */;
-INSERT INTO `eagleapplicationdesc_eagleapplicationdesc` VALUES ('54TRXX_____62aP_XKQFUAki0Ow',0,'hiveQueryLog','Hive query log security check application','HIVE','DAM','\0\0\0\0\0\0\0\0\0common\0\0\0\0\0\0classification\0\0\0\0\0\0userProfile\0\0\0\0\0\0metadata','{\n	\"view\": {\n		\"prefix\": \"hiveResourceSensitivity\",\n		\"service\": \"HiveResourceSensitivityService\",\n		\"keys\": [\n			\"hiveResource\",\n			\"sensitivityType\"\n		],\n		\"type\": \"table\",\n		\"api\": {\n			\"database\": \"hiveResource/databases\",\n			\"table\": \"hiveResource/tables\",\n			\"column\": \"hiveResource/columns\"\n		},\n		\"mapping\": {\n			\"database\": \"database\",\n			\"table\": \"table\",\n			\"column\": \"column\"\n		}\n	}\n}'),('54TRXX_____62aP_XKQFUHeJk1I',0,'hdfsAuditLog','HDFS audit log security check application','HDFS','DAM','\0\0\0\0\0\0\0\0\0common\0\0\0\0\0\0classification\0\0\0\0\0\0userProfile\0\0\0\0\0\0metadata','{\n	\"view\": {\n		\"prefix\": \"fileSensitiv
 ity\",\n		\"service\": \"FileSensitivityService\",\n		\"keys\": [\n			\"filedir\",\n			\"sensitivityType\"\n		],\n		\"type\": \"folder\",\n		\"api\": \"hdfsResource\"\n	}\n}'),('54TRXX_____62aP_XKQFUIwKcus',0,'hbaseSecurityLog','HBASE audit log security check application','HBASE','DAM','\0\0\0\0\0\0\0\0\0common\0\0\0\0\0\0classification\0\0\0\0\0\0userProfile\0\0\0\0\0\0metadata','{\n	\"view\": {\n		\"prefix\": \"hbaseResourceSensitivity\",\n		\"service\": \"HbaseResourceSensitivityService\",\n		\"keys\": [\n			\"hbaseResource\",\n			\"sensitivityType\"\n		],\n		\"type\": \"table\",\n		\"api\": {\n			\"database\": \"hbaseResource/namespaces\",\n			\"table\": \"hbaseResource/tables\",\n			\"column\": \"hbaseResource/columns\"\n		},\n		\"mapping\": {\n			\"database\": \"namespace\",\n			\"table\": \"table\",\n			\"column\": \"columnFamily\"\n		}\n	}\n}');
-/*!40000 ALTER TABLE `eagleapplicationdesc_eagleapplicationdesc` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `eaglefeaturedesc_eaglefeaturedesc`
---
-
-DROP TABLE IF EXISTS `eaglefeaturedesc_eaglefeaturedesc`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `eaglefeaturedesc_eaglefeaturedesc` (
-  `uuid` varchar(100) COLLATE utf8_bin NOT NULL,
-  `timestamp` bigint(20) DEFAULT NULL,
-  `feature` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `description` varchar(1024) COLLATE utf8_bin DEFAULT NULL,
-  `version` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `uuid_UNIQUE` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `eaglefeaturedesc_eaglefeaturedesc`
---
-
-LOCK TABLES `eaglefeaturedesc_eaglefeaturedesc` WRITE;
-/*!40000 ALTER TABLE `eaglefeaturedesc_eaglefeaturedesc` DISABLE KEYS */;
-INSERT INTO `eaglefeaturedesc_eaglefeaturedesc` VALUES ('4DMSA3_____62aP_xaJ69hbKM-Y',0,'classification','Sensitivity browser of the data classification.','v0.3.0'),('4DMSA3_____62aP_xaJ69jj4wMM',0,'metrics','Metrics dashboard','v0.3.0'),('4DMSA3_____62aP_xaJ69q8_Kes',0,'common','Provide the Policy & Alert feature.','v0.3.0'),('4DMSA3_____62aP_xaJ69tuQa14',0,'userProfile','Machine learning of the user profile','v0.3.0'),('4DMSA3_____62aP_xaJ69uUtey8',0,'metadata','Stream metadata viewer','v0.3.0');
-/*!40000 ALTER TABLE `eaglefeaturedesc_eaglefeaturedesc` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `eaglesiteapplication_eaglesiteapplication`
---
-
-DROP TABLE IF EXISTS `eaglesiteapplication_eaglesiteapplication`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `eaglesiteapplication_eaglesiteapplication` (
-  `uuid` varchar(100) COLLATE utf8_bin NOT NULL,
-  `timestamp` bigint(20) DEFAULT NULL,
-  `site` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `application` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `enabled` tinyint(1) DEFAULT NULL,
-  `config` varchar(1024) COLLATE utf8_bin DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `uuid_UNIQUE` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `eaglesiteapplication_eaglesiteapplication`
---
-
-LOCK TABLES `eaglesiteapplication_eaglesiteapplication` WRITE;
-/*!40000 ALTER TABLE `eaglesiteapplication_eaglesiteapplication` DISABLE KEYS */;
-INSERT INTO `eaglesiteapplication_eaglesiteapplication` VALUES ('D-7M5X_____62aP_ADXfR28vvsdcpAVQCSLQ7A',0,'sandbox','hiveQueryLog',1,'{\"accessType\":\"metastoredb_jdbc\",\"password\":\"hive\",\"user\":\"hive\",\"jdbcDriverClassName\":\"com.mysql.jdbc.Driver\",\"jdbcUrl\":\"jdbc:mysql://sandbox.hortonworks.com/hive?createDatabaseIfNotExist=true\"}'),('D-7M5X_____62aP_ADXfR28vvsdcpAVQd4mTUg',0,'sandbox','hdfsAuditLog',1,'{\"fs.defaultFS\":\"hdfs://sandbox.hortonworks.com:8020\"}'),('D-7M5X_____62aP_ADXfR28vvsdcpAVQjApy6w',0,'sandbox','hbaseSecurityLog',1,'{\"hbase.zookeeper.property.clientPort\":\"2181\", \"hbase.zookeeper.quorum\":\"localhost\"}');
-/*!40000 ALTER TABLE `eaglesiteapplication_eaglesiteapplication` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `eaglesitedesc_eaglesitedesc`
---
-
-DROP TABLE IF EXISTS `eaglesitedesc_eaglesitedesc`;
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `eaglesitedesc_eaglesitedesc` (
-  `uuid` varchar(100) COLLATE utf8_bin NOT NULL,
-  `timestamp` bigint(20) DEFAULT NULL,
-  `site` varchar(100) COLLATE utf8_bin DEFAULT NULL,
-  `enabled` tinyint(1) DEFAULT NULL,
-  PRIMARY KEY (`uuid`),
-  UNIQUE KEY `uuid_UNIQUE` (`uuid`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `eaglesitedesc_eaglesitedesc`
---
-
-LOCK TABLES `eaglesitedesc_eaglesitedesc` WRITE;
-/*!40000 ALTER TABLE `eaglesitedesc_eaglesitedesc` DISABLE KEYS */;
-INSERT INTO `eaglesitedesc_eaglesitedesc` VALUES ('phJknH_____62aP_ADXfR28vvsc',0,'sandbox',1);
-/*!40000 ALTER TABLE `eaglesitedesc_eaglesitedesc` ENABLE KEYS */;
-UNLOCK TABLES;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2016-03-08 18:34:19

http://git-wip-us.apache.org/repos/asf/eagle/blob/8b3729f9/eagle-assembly/src/main/lib/share/.placeholder
----------------------------------------------------------------------
diff --git a/eagle-assembly/src/main/lib/share/.placeholder b/eagle-assembly/src/main/lib/share/.placeholder
deleted file mode 100644
index 823c394..0000000
--- a/eagle-assembly/src/main/lib/share/.placeholder
+++ /dev/null
@@ -1,14 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements.  See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
\ No newline at end of file