You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2017/05/17 20:51:35 UTC

[01/25] ambari git commit: AMBARI-21024. Support rpm/deb build for ambari-infra-manager (oleewere)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-14714 e90de83c0 -> 922f3080b


AMBARI-21024. Support rpm/deb build for ambari-infra-manager (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b97d268a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b97d268a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b97d268a

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: b97d268a1920c833fd8c7e94921a9148f7d2c398
Parents: e54252c
Author: oleewere <ol...@gmail.com>
Authored: Mon May 15 16:43:10 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Tue May 16 11:46:18 2017 +0200

----------------------------------------------------------------------
 ambari-infra/ambari-infra-assembly/pom.xml      | 89 ++++++++++++++++++++
 .../src/main/package/deb/manager/control        | 22 +++++
 .../src/main/package/deb/manager/postinst       | 15 ++++
 .../src/main/package/deb/manager/postrm         | 15 ++++
 .../src/main/package/deb/manager/preinst        | 15 ++++
 .../src/main/package/deb/manager/prerm          | 15 ++++
 ambari-infra/ambari-infra-manager/build.xml     | 53 ++++++++++++
 ambari-infra/ambari-infra-manager/pom.xml       | 43 +++++++++-
 .../org/apache/ambari/infra/InfraManager.java   |  1 -
 .../conf/batch/InfraManagerBatchConfig.java     |  2 +-
 .../src/main/resources/infraManager.sh          | 20 +++++
 11 files changed, 287 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/pom.xml b/ambari-infra/ambari-infra-assembly/pom.xml
index 51e5804..550d97c 100644
--- a/ambari-infra/ambari-infra-assembly/pom.xml
+++ b/ambari-infra/ambari-infra-assembly/pom.xml
@@ -38,6 +38,10 @@
     <solr.client.mapping.path>${mapping.base.path}/${solr.client.package.name}</solr.client.mapping.path>
     <solr.client.dir>${project.basedir}/../ambari-infra-solr-client</solr.client.dir>
     <infra.solr.plugin.dir>${project.basedir}/../ambari-infra-solr-plugin</infra.solr.plugin.dir>
+    <infra-manager.package.name>ambari-infra-manager</infra-manager.package.name>
+    <infra-manager.dir>${project.basedir}/../ambari-infra-manager</infra-manager.dir>
+    <infra-manager.mapping.path>${mapping.base.path}/${infra-manager.package.name}</infra-manager.mapping.path>
+    <infra-manager.conf.mapping.path>/etc/${infra-manager.package.name}/conf</infra-manager.conf.mapping.path>
   </properties>
 
   <profiles>
@@ -118,6 +122,43 @@
                   </mappings>
                 </configuration>
               </execution>
+              <execution>
+                <id>infra-manager</id>
+                <phase>package</phase>
+                <goals>
+                  <goal>rpm</goal>
+                </goals>
+                <configuration>
+                  <group>Development</group>
+                  <name>${infra-manager.package.name}</name>
+                  <mappings>
+                    <mapping>
+                      <directory>${infra-manager.mapping.path}</directory>
+                      <sources>
+                        <source>
+                          <location>${infra-manager.dir}/target/package</location>
+                          <excludes>
+                            <exclude>log4j.xml</exclude>
+                            <exclude>infra-manager.properties</exclude>
+                          </excludes>
+                        </source>
+                      </sources>
+                    </mapping>
+                    <mapping>
+                      <directory>${infra-manager.conf.mapping.path}</directory>
+                      <sources>
+                        <source>
+                          <location>${infra-manager.dir}/target/package</location>
+                          <includes>
+                            <include>log4j.xml</include>
+                            <include>infra-manager.properties</include>
+                          </includes>
+                        </source>
+                      </sources>
+                    </mapping>
+                  </mappings>
+                </configuration>
+              </execution>
             </executions>
           </plugin>
           <plugin>
@@ -277,6 +318,49 @@
                   </dataSet>
                 </configuration>
               </execution>
+
+              <execution>
+                <phase>package</phase>
+                <id>jdeb-infra-manager</id>
+                <goals>
+                  <goal>jdeb</goal>
+                </goals>
+                <configuration>
+                  <controlDir>${basedir}/src/main/package/deb/manager</controlDir>
+                  <deb>${basedir}/target/${infra-manager.package.name}_${package-version}-${package-release}.deb</deb>
+                  <skip>false</skip>
+                  <skipPOMs>false</skipPOMs>
+                  <dataSet>
+                    <data>
+                      <src>${infra-manager.dir}/target/ambari-infra-manager.tar.gz</src>
+                      <type>archive</type>
+                      <mapper>
+                        <type>perm</type>
+                        <user>root</user>
+                        <group>root</group>
+                        <prefix>${infra-manager.mapping.path}</prefix>
+                      </mapper>
+                      <excludes>
+                        log4j.xml,infra-manager.properties
+                      </excludes>
+                    </data>
+                    <data>
+                      <src>${infra-manager.dir}/target/package</src>
+                      <type>directory</type>
+                      <mapper>
+                        <prefix>${infra-manager.conf.mapping.path}</prefix>
+                        <type>perm</type>
+                        <user>root</user>
+                        <group>root</group>
+                        <filemode>644</filemode>
+                      </mapper>
+                      <includes>
+                        log4j.xml,infra-manager.properties
+                      </includes>
+                    </data>
+                  </dataSet>
+                </configuration>
+              </execution>
             </executions>
           </plugin>
           <plugin>
@@ -330,6 +414,11 @@
       <artifactId>ambari-infra-solr-plugin</artifactId>
       <version>${project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-infra-manager</artifactId>
+      <version>${project.version}</version>
+    </dependency>
   </dependencies>
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/control
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/control b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/control
new file mode 100644
index 0000000..03663a0
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/control
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+Package: [[infra-manager.package.name]]
+Version: [[package-version]]-[[package-release]]
+Section: [[deb.section]]
+Priority: [[deb.priority]]
+Depends: [[deb.dependency.list]]
+Architecture: [[deb.architecture]]
+Description: [[description]]
+Maintainer: [[deb.publisher]]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postinst
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postinst b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postinst
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postinst
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postrm
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postrm b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postrm
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/postrm
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/preinst
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/preinst b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/preinst
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/preinst
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/prerm
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/prerm b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/prerm
new file mode 100644
index 0000000..21a01fa
--- /dev/null
+++ b/ambari-infra/ambari-infra-assembly/src/main/package/deb/manager/prerm
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-manager/build.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/build.xml b/ambari-infra/ambari-infra-manager/build.xml
new file mode 100644
index 0000000..c7954d9
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/build.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project basedir="." default="build" name="infra-manager">
+  <property environment="env"/>
+  <property name="debuglevel" value="source,lines,vars"/>
+  <dirname property="builddir" file="build.xml"/>
+  <property name="target" value="1.7"/>
+  <property name="source" value="1.7"/>
+  <target name="init">
+  </target>
+  <target name="build"/>
+
+  <target name="package">
+    <delete dir="target/package"/>
+    <copy todir="target/package/libs" includeEmptyDirs="no">
+      <fileset dir="target/libs"/>
+    </copy>
+    <copy todir="target/package/libs" includeEmptyDirs="no">
+      <fileset file="target/*.jar"/>
+    </copy>
+    <copy todir="target/package" includeEmptyDirs="no">
+      <fileset file="src/main/resources/infraManager.sh"/>
+      <fileset file="target/classes/infra-manager.properties"/>
+      <fileset file="target/classes/log4j.xml"/>
+    </copy>
+    <chmod file="target/package/*.sh" perm="755"/>
+    <tar compression="gzip" destfile="target/ambari-infra-manager.tar.gz">
+      <tarfileset mode="755" dir="target/package">
+        <include name="*.sh"/>
+      </tarfileset>
+      <tarfileset mode="664" dir="target/package">
+        <exclude name="*.sh"/>
+      </tarfileset>
+    </tar>
+
+  </target>
+
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-manager/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/pom.xml b/ambari-infra/ambari-infra-manager/pom.xml
index bd5bf03..b7708c2 100644
--- a/ambari-infra/ambari-infra-manager/pom.xml
+++ b/ambari-infra/ambari-infra-manager/pom.xml
@@ -43,7 +43,7 @@
   </properties>
 
   <build>
-    <finalName>ambari-infra-manager</finalName>
+    <finalName>ambari-infra-manager_${project.version}</finalName>
     <plugins>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
@@ -69,6 +69,47 @@
           <mainClass>org.apache.ambari.infra.InfraManager</mainClass>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>2.8</version>
+        <executions>
+          <execution>
+            <id>copy-dependencies</id>
+            <phase>package</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <outputAbsoluteArtifactFilename>true</outputAbsoluteArtifactFilename>
+              <outputDirectory>${basedir}/target/libs</outputDirectory>
+              <overWriteReleases>false</overWriteReleases>
+              <overWriteSnapshots>false</overWriteSnapshots>
+              <overWriteIfNewer>true</overWriteIfNewer>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <version>1.7</version>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <configuration>
+              <target>
+                <ant antfile="build.xml">
+                  <target name="package"/>
+                </ant>
+              </target>
+            </configuration>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
   </build>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
index 227bab4..656127e 100644
--- a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/InfraManager.java
@@ -142,7 +142,6 @@ public class InfraManager {
     ServletHolder jerseyServlet = context.addServlet(org.glassfish.jersey.servlet.ServletContainer.class, "/api/v1/*");
     jerseyServlet.setInitOrder(1);
     jerseyServlet.setInitParameter("jersey.config.server.provider.packages","org.apache.ambari.infra.rest,io.swagger.jaxrs.listing");
-
     context.getSessionHandler().getSessionManager().setMaxInactiveInterval(SESSION_TIMEOUT);
     context.getSessionHandler().getSessionManager().getSessionCookieConfig().setName(INFRA_MANAGER_SESSION_ID);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
index a587ec2..7310626 100644
--- a/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
+++ b/ambari-infra/ambari-infra-manager/src/main/java/org/apache/ambari/infra/conf/batch/InfraManagerBatchConfig.java
@@ -104,7 +104,7 @@ public class InfraManagerBatchConfig {
     dataSource.setDriverClassName("org.sqlite.JDBC");
     dataSource.setUrl("jdbc:sqlite:" + sqliteDbFileLocation);
     dataSource.setUsername(databaseUsername);
-    dataSource.setUsername(databasePassword);
+    dataSource.setPassword(databasePassword);
     return dataSource;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b97d268a/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh b/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
new file mode 100644
index 0000000..9f40d5c
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JVM="java"
+sdir="`dirname \"$0\"`"
+
+PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "/etc/ambari-infra-manager/conf:$sdir:$sdir/libs/*" org.apache.ambari.infra.InfraManager ${1+"$@"}
\ No newline at end of file


[02/25] ambari git commit: AMBARI-21020. Support external story locations in Log Search IT framework (oleewere)

Posted by jl...@apache.org.
AMBARI-21020. Support external story locations in Log Search IT framework (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e54252c8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e54252c8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e54252c8

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: e54252c829f674919d51d38c33689cce7a17e075
Parents: 38cc334
Author: oleewere <ol...@gmail.com>
Authored: Mon May 15 12:12:36 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Tue May 16 11:46:18 2017 +0200

----------------------------------------------------------------------
 ambari-logsearch/README.md                      |  1 +
 ambari-logsearch/ambari-logsearch-it/pom.xml    |  6 ++
 .../story/LogSearchBackendStories.java          | 19 +---
 .../logsearch/story/LogSearchStoryLocator.java  | 97 ++++++++++++++++++++
 .../logsearch/story/LogSearchUIStories.java     | 10 +-
 5 files changed, 114 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e54252c8/ambari-logsearch/README.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/README.md b/ambari-logsearch/README.md
index 92b98f9..d05f45a 100644
--- a/ambari-logsearch/README.md
+++ b/ambari-logsearch/README.md
@@ -45,5 +45,6 @@ mvn clean integration-test -Dbackend-tests failsafe:verify
 xquartz
 # then in an another window you can start ui tests
 mvn clean integration-test -Dselenium-tests failsafe:verify
+# you can specify story file folde location with -Dbackend.stories.location and -Dui.stories.location (absolute file path) in the commands
 ```
 Also you can run from the IDE, but make sure all of the ambari logsearch modules are built.

http://git-wip-us.apache.org/repos/asf/ambari/blob/e54252c8/ambari-logsearch/ambari-logsearch-it/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-it/pom.xml b/ambari-logsearch/ambari-logsearch-it/pom.xml
index 0058c80..81af9e8 100644
--- a/ambari-logsearch/ambari-logsearch-it/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-it/pom.xml
@@ -39,6 +39,8 @@
     <failsafe-plugin.version>2.20</failsafe-plugin.version>
     <forkCount>1</forkCount>
     <docker.host>localhost</docker.host>
+    <backend.stories.location>NONE</backend.stories.location>
+    <ui.stories.location>NONE</ui.stories.location>
   </properties>
 
   <dependencies>
@@ -162,6 +164,7 @@
                   <systemPropertyVariables>
                     <log4j.configuration>file:${project.build.testOutputDirectory}/log4j.properties</log4j.configuration>
                     <docker.host>${docker.host}</docker.host>
+                    <ui.stories.location>${ui.stories.location}</ui.stories.location>
                   </systemPropertyVariables>
                 </configuration>
               </execution>
@@ -204,6 +207,7 @@
                   <systemPropertyVariables>
                     <log4j.configuration>file:${project.build.testOutputDirectory}/log4j.properties</log4j.configuration>
                     <docker.host>${docker.host}</docker.host>
+                    <backend.stories.location>${backend.stories.location}</backend.stories.location>
                   </systemPropertyVariables>
                 </configuration>
               </execution>
@@ -246,6 +250,8 @@
                   <systemPropertyVariables>
                     <log4j.configuration>file:${project.build.testOutputDirectory}/log4j.properties</log4j.configuration>
                     <docker.host>${docker.host}</docker.host>
+                    <backend.stories.location>${backend.stories.location}</backend.stories.location>
+                    <ui.stories.location>${ui.stories.location}</ui.stories.location>
                   </systemPropertyVariables>
                 </configuration>
               </execution>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e54252c8/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchBackendStories.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchBackendStories.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchBackendStories.java
index 46f2928..fa7a527 100644
--- a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchBackendStories.java
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchBackendStories.java
@@ -18,7 +18,6 @@
  */
 package org.apache.ambari.logsearch.story;
 
-import com.google.common.base.Function;
 import com.google.common.base.Predicate;
 import com.google.common.collect.Collections2;
 import com.google.common.collect.Lists;
@@ -27,31 +26,24 @@ import org.apache.ambari.logsearch.steps.SolrSteps;
 import org.apache.ambari.logsearch.steps.LogSearchDockerSteps;
 import org.jbehave.core.configuration.Configuration;
 import org.jbehave.core.configuration.MostUsefulConfiguration;
-import org.jbehave.core.embedder.executors.SameThreadExecutors;
-import org.jbehave.core.io.LoadFromClasspath;
-import org.jbehave.core.io.StoryFinder;
-import org.jbehave.core.io.StoryPathResolver;
-import org.jbehave.core.io.UnderscoredCamelCaseResolver;
 import org.jbehave.core.junit.JUnitStories;
-import org.jbehave.core.junit.JUnitStory;
 import org.jbehave.core.reporters.Format;
 import org.jbehave.core.reporters.StoryReporterBuilder;
 import org.jbehave.core.steps.InjectableStepsFactory;
 import org.jbehave.core.steps.InstanceStepsFactory;
 import org.junit.Test;
 
-import javax.annotation.Nullable;
-import java.util.Arrays;
 import java.util.List;
 
-import static org.jbehave.core.io.CodeLocations.codeLocationFromClass;
-
 public class LogSearchBackendStories extends JUnitStories {
 
+  private static final String BACKEND_STORIES_LOCATION_PROPERTY = "backend.stories.location";
+  private static final String STORY_SUFFIX = ".story";
+
   @Override
   public Configuration configuration() {
     return new MostUsefulConfiguration()
-      .useStoryLoader(new LoadFromClasspath(this.getClass()))
+      .useStoryLoader(LogSearchStoryLocator.getStoryLoader(BACKEND_STORIES_LOCATION_PROPERTY, this.getClass()))
       .useStoryReporterBuilder(
         new StoryReporterBuilder().withFailureTrace(true).withDefaultFormats().withFormats(Format.CONSOLE, Format.TXT));
   }
@@ -71,8 +63,7 @@ public class LogSearchBackendStories extends JUnitStories {
 
   @Override
   protected List<String> storyPaths() {
-    List<String> backendStories = new StoryFinder()
-      .findPaths(codeLocationFromClass(this.getClass()).getFile(), Arrays.asList("**/*.story"), null);
+    List<String> backendStories = LogSearchStoryLocator.findStories(BACKEND_STORIES_LOCATION_PROPERTY, STORY_SUFFIX, this.getClass());
     return Lists.newArrayList(Collections2.filter(backendStories, new Predicate<String>() {
       @Override
       public boolean apply(String storyFileName) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e54252c8/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStoryLocator.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStoryLocator.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStoryLocator.java
new file mode 100644
index 0000000..bed7999
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchStoryLocator.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.story;
+
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.StringUtils;
+import org.jbehave.core.io.LoadFromClasspath;
+import org.jbehave.core.io.LoadFromRelativeFile;
+import org.jbehave.core.io.StoryFinder;
+import org.jbehave.core.io.StoryLoader;
+
+import java.io.File;
+import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.jbehave.core.io.CodeLocations.codeLocationFromClass;
+
+/**
+ * Helper class for loading story files from the classpath or externally - based on system properties
+ */
+public class LogSearchStoryLocator {
+
+  private LogSearchStoryLocator() {
+  }
+
+  /**
+   * Get the proper story loader based on story location property (if empty or NONE - use story loading from classpath)
+   * @param property Story location property (absolute path - folder)
+   * @param clazz Class of the *Stories object
+   */
+  public static StoryLoader getStoryLoader(String property, Class clazz) {
+    boolean useExternalStoryLocation = useExternalStoryLocation(property);
+    if (useExternalStoryLocation) {
+      try {
+        return new LoadFromRelativeFile(new URL("file://" + System.getProperty(property)));
+      } catch (Exception e) {
+        throw new RuntimeException("Cannot load story files from url: file://" + System.getProperty(property));
+      }
+    } else {
+      return new LoadFromClasspath(clazz);
+    }
+  }
+
+
+  /**
+   * Find stories based on story location property, if the property is not set or NONE, then the story files will be loaded from the classpath
+   * @param property Story location property (absolute path - folder)
+   * @param suffix Story suffix for specific stories - i.e. : .ui.story
+   * @param clazz Class of the *Stories object
+   */
+  public static List<String> findStories(String property, String suffix, Class clazz) {
+    List<String> stories = null;
+    if (useExternalStoryLocation(property)) {
+      stories = findStoriesInFolder(System.getProperty(property), suffix);
+    } else {
+      stories = new StoryFinder()
+        .findPaths(codeLocationFromClass(clazz).getFile(), Arrays.asList(String.format("**/*%s", suffix)), null);
+    }
+    return stories;
+  }
+
+  private static List<String> findStoriesInFolder(String folderAbsolutePath, String suffix) {
+    List<String> results = Lists.newArrayList();
+    File folder = new File(folderAbsolutePath);
+    File[] listOfFiles = folder.listFiles();
+    if (listOfFiles != null) {
+      for (File file : listOfFiles) {
+        if (file.getName().endsWith(suffix)) {
+          results.add(file.getName());
+        }
+      }
+    }
+    return results;
+  }
+
+  private static boolean useExternalStoryLocation(String property) {
+    String storyLocationProp = System.getProperty(property);
+    return StringUtils.isNotEmpty(storyLocationProp) && !"NONE".equals(storyLocationProp);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e54252c8/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchUIStories.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchUIStories.java b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchUIStories.java
index 217c50f..5417ab1 100644
--- a/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchUIStories.java
+++ b/ambari-logsearch/ambari-logsearch-it/src/test/java/org/apache/ambari/logsearch/story/LogSearchUIStories.java
@@ -24,8 +24,6 @@ import org.apache.ambari.logsearch.steps.LogSearchUISteps;
 import org.jbehave.core.configuration.Configuration;
 import org.jbehave.core.Embeddable;
 import org.jbehave.core.embedder.executors.SameThreadExecutors;
-import org.jbehave.core.io.LoadFromClasspath;
-import org.jbehave.core.io.StoryFinder;
 import org.jbehave.core.junit.JUnitStories;
 import org.jbehave.core.reporters.StoryReporterBuilder;
 import org.jbehave.core.steps.InjectableStepsFactory;
@@ -52,6 +50,9 @@ public class LogSearchUIStories extends JUnitStories {
   private WebDriverProvider driverProvider;
   private SeleniumContext context;
 
+  private static final String UI_STORIES_LOCATION_PROPERTY = "ui.stories.location";
+  private static final String STORY_SUFFIX = ".ui.story";
+
   public LogSearchUIStories() {
     String dockerHost = System.getProperty("docker.host") != null ? System.getProperty("docker.host") : "localhost";
     System.setProperty("REMOTE_WEBDRIVER_URL", String.format("http://%s:4444/wd/hub", dockerHost));
@@ -70,7 +71,7 @@ public class LogSearchUIStories extends JUnitStories {
     return new SeleniumConfiguration()
       .useSeleniumContext(context)
       .useWebDriverProvider(driverProvider)
-      .useStoryLoader(new LoadFromClasspath(embeddableClass))
+      .useStoryLoader(LogSearchStoryLocator.getStoryLoader(UI_STORIES_LOCATION_PROPERTY, this.getClass()))
       .useStoryReporterBuilder(new StoryReporterBuilder()
         .withCodeLocation(codeLocationFromClass(embeddableClass))
         .withDefaultFormats()
@@ -86,7 +87,6 @@ public class LogSearchUIStories extends JUnitStories {
 
   @Override
   protected List<String> storyPaths() {
-    return new StoryFinder()
-      .findPaths(codeLocationFromClass(this.getClass()).getFile(), Arrays.asList("**/*.ui.story"), null);
+    return LogSearchStoryLocator.findStories(UI_STORIES_LOCATION_PROPERTY, STORY_SUFFIX, this.getClass());
   }
 }


[19/25] ambari git commit: AMBARI-20758 Aggregate local metrics for minute aggregation time window (dsen)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
index b876a3d..28944ca 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
@@ -23,6 +23,8 @@ port={{metric_collector_port}}
 collectionFrequency={{metrics_collection_period}}000
 maxRowCacheSize=10000
 sendInterval={{metrics_report_interval}}000
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 instanceId={{cluster_name}}
 set.instanceId={{set_instanceId}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index efea167..d45aea6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -184,6 +184,9 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 # if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
 if 'slave_hosts' in config['clusterHostInfo']:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
index 24535c5..c8f2f13 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -78,6 +78,8 @@ hbase.sink.timeline.protocol={{metric_collector_protocol}}
 hbase.sink.timeline.port={{metric_collector_port}}
 hbase.sink.timeline.instanceId={{cluster_name}}
 hbase.sink.timeline.set.instanceId={{set_instanceId}}
+hbase.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+hbase.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 hbase.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
index 9076269..f4e25e1 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -76,6 +76,8 @@ hbase.sink.timeline.protocol={{metric_collector_protocol}}
 hbase.sink.timeline.port={{metric_collector_port}}
 hbase.sink.timeline.instanceId={{cluster_name}}
 hbase.sink.timeline.set.instanceId={{set_instanceId}}
+hbase.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+hbase.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 hbase.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
index fae61d3..4b03880 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-metrics2.properties.xml
@@ -88,6 +88,8 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.port={{metric_collector_port}}
 *.sink.timeline.instanceId={{cluster_name}}
 *.sink.timeline.set.instanceId={{set_instanceId}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index d854451..c1128a5 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -566,6 +566,8 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 ########################################################
 ############# Atlas related params #####################
 ########################################################

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
index 9328f9f..d78a342 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
@@ -53,6 +53,8 @@
   hivemetastore.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hivemetastore.sink.timeline.port={{metric_collector_port}}
   hivemetastore.sink.timeline.protocol={{metric_collector_protocol}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
index 9a7f9dc..1f496ef 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
@@ -53,5 +53,7 @@
   hiveserver2.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hiveserver2.sink.timeline.port={{metric_collector_port}}
   hiveserver2.sink.timeline.protocol={{metric_collector_protocol}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
index e9fe024..01869c0 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llapdaemon.j2
@@ -52,5 +52,7 @@
   llapdaemon.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llapdaemon.sink.timeline.port={{metric_collector_port}}
   llapdaemon.sink.timeline.protocol={{metric_collector_protocol}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
index bd7eb0c..2e25c4a 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
@@ -52,5 +52,7 @@
   llaptaskscheduler.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llaptaskscheduler.sink.timeline.port={{metric_collector_port}}
   llaptaskscheduler.sink.timeline.protocol={{metric_collector_protocol}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
index c0ac535..a12d388 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/scripts/params_linux.py
@@ -565,6 +565,9 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 ########################################################
 ############# Atlas related params #####################
 ########################################################

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
index 9328f9f..d78a342 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hivemetastore.properties.j2
@@ -53,6 +53,8 @@
   hivemetastore.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hivemetastore.sink.timeline.port={{metric_collector_port}}
   hivemetastore.sink.timeline.protocol={{metric_collector_protocol}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hivemetastore.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
index 9a7f9dc..1f496ef 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-hiveserver2.properties.j2
@@ -53,5 +53,7 @@
   hiveserver2.sink.timeline.collector.hosts={{ams_collector_hosts}}
   hiveserver2.sink.timeline.port={{metric_collector_port}}
   hiveserver2.sink.timeline.protocol={{metric_collector_protocol}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  hiveserver2.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
index e9fe024..01869c0 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llapdaemon.j2
@@ -52,5 +52,7 @@
   llapdaemon.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llapdaemon.sink.timeline.port={{metric_collector_port}}
   llapdaemon.sink.timeline.protocol={{metric_collector_protocol}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llapdaemon.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2 b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
index bd7eb0c..2e25c4a 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/package/templates/hadoop-metrics2-llaptaskscheduler.j2
@@ -52,5 +52,7 @@
   llaptaskscheduler.sink.timeline.collector.hosts={{ams_collector_hosts}}
   llaptaskscheduler.sink.timeline.port={{metric_collector_port}}
   llaptaskscheduler.sink.timeline.protocol={{metric_collector_protocol}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  llaptaskscheduler.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 {% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
index e01dacd..26e7a77 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/configuration/kafka-broker.xml
@@ -422,4 +422,15 @@
     <description>Timeline metrics reporter send interval</description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>kafka.timeline.metrics.host_in_memory_aggregation</name>
+    <value>{{host_in_memory_aggregation}}</value>
+    <description>if set to "true" host metrics will be aggregated in memory on each host</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.host_in_memory_aggregation_port</name>
+    <value>{{host_in_memory_aggregation_port}}</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
index 5b0be54..9acc1ef 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
@@ -156,6 +156,9 @@ if has_metric_collector:
     metric_collector_protocol = 'https'
   else:
     metric_collector_protocol = 'http'
+
+  host_in_memory_aggregation = str(default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)).lower()
+  host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
   pass
 
 # Security-related params

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
index d9fae76..78ec165 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
@@ -208,6 +208,8 @@ metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sin
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 metric_collector_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar"
 metric_collector_legacy_sink_jar = "/usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar"
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 
 
 # Cluster Zookeeper quorum

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
index 51162e8..67b89c4 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/config.yaml.j2
@@ -61,6 +61,8 @@ metrics_collector:
   protocol: "{{metric_collector_protocol}}"
   port: "{{metric_collector_port}}"
   appId: "{{metric_collector_app_id}}"
+  host_in_memory_aggregation = {{host_in_memory_aggregation}}
+  host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
   # HTTPS settings
   truststore.path : "{{metric_truststore_path}}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
index 0501039..1dedffc 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/templates/storm-metrics2.properties.j2
@@ -25,6 +25,8 @@ sendInterval={{metrics_report_interval}}000
 clusterReporterAppId=nimbus
 instanceId={{cluster_name}}
 set.instanceId={{set_instanceId}}
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 7282bb5..3488e75 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -164,6 +164,9 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 # Cluster Zookeeper quorum
 zookeeper_quorum = None
 if has_zk_host:

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 1b02a97..1f8499f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -77,6 +77,8 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.port={{metric_collector_port}}
 *.sink.timeline.instanceId={{cluster_name}}
 *.sink.timeline.set.instanceId={{set_instanceId}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
index fae61d3..4b03880 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HDFS/configuration/hadoop-metrics2.properties.xml
@@ -88,6 +88,8 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.port={{metric_collector_port}}
 *.sink.timeline.instanceId={{cluster_name}}
 *.sink.timeline.set.instanceId={{set_instanceId}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
index 678bbde..a3830f7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
@@ -158,6 +158,8 @@ if has_metric_collector:
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 
 # Cluster Zookeeper quorum
 zookeeper_quorum = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
index 1b02a97..1f8499f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -77,6 +77,8 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 *.sink.timeline.port={{metric_collector_port}}
 *.sink.timeline.instanceId={{cluster_name}}
 *.sink.timeline.set.instanceId={{set_instanceId}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java b/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
index 4a60892..3ee8ebc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/metric/system/impl/TestAmbariMetricsSinkImpl.java
@@ -74,6 +74,16 @@ public class TestAmbariMetricsSinkImpl extends AbstractTimelineMetricsSink imple
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return true;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return 61888;
+  }
+
+  @Override
   public void init(MetricsConfiguration configuration) {
 
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
index 6a70675..8cc876f 100644
--- a/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
+++ b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/hooks/before-START/scripts/params.py
@@ -135,6 +135,8 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 #hadoop params
 
 if has_namenode or dfs_type == 'HCFS':


[23/25] ambari git commit: AMBARI-21031. Add docker support for infra manager (oleewere)

Posted by jl...@apache.org.
AMBARI-21031. Add docker support for infra manager (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8bf136a4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8bf136a4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8bf136a4

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 8bf136a4b78b495de849565400f24a7cc2cbd834
Parents: 1568f80
Author: oleewere <ol...@gmail.com>
Authored: Tue May 16 13:57:12 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Wed May 17 19:35:46 2017 +0200

----------------------------------------------------------------------
 ambari-infra/ambari-infra-assembly/pom.xml      |  6 +-
 ambari-infra/ambari-infra-manager/README.md     |  6 ++
 ambari-infra/ambari-infra-manager/build.xml     |  1 +
 .../ambari-infra-manager/docker/Dockerfile      | 52 ++++++++++++
 .../ambari-infra-manager/docker/bin/start.sh    | 21 +++++
 .../docker/infra-manager-docker.sh              | 85 ++++++++++++++++++++
 .../src/main/resources/infra-manager-env.sh     | 18 +++++
 .../src/main/resources/infraManager.sh          |  2 +-
 8 files changed, 188 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-assembly/pom.xml b/ambari-infra/ambari-infra-assembly/pom.xml
index 550d97c..fafef7e 100644
--- a/ambari-infra/ambari-infra-assembly/pom.xml
+++ b/ambari-infra/ambari-infra-assembly/pom.xml
@@ -140,6 +140,7 @@
                           <excludes>
                             <exclude>log4j.xml</exclude>
                             <exclude>infra-manager.properties</exclude>
+                            <exclude>infra-manager-env.sh</exclude>
                           </excludes>
                         </source>
                       </sources>
@@ -152,6 +153,7 @@
                           <includes>
                             <include>log4j.xml</include>
                             <include>infra-manager.properties</include>
+                            <include>infra-manager-env.sh</include>
                           </includes>
                         </source>
                       </sources>
@@ -341,7 +343,7 @@
                         <prefix>${infra-manager.mapping.path}</prefix>
                       </mapper>
                       <excludes>
-                        log4j.xml,infra-manager.properties
+                        log4j.xml,infra-manager.properties,infra-manager-env.sh
                       </excludes>
                     </data>
                     <data>
@@ -355,7 +357,7 @@
                         <filemode>644</filemode>
                       </mapper>
                       <includes>
-                        log4j.xml,infra-manager.properties
+                        log4j.xml,infra-manager.properties,infra-manager-env.sh
                       </includes>
                     </data>
                   </dataSet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/README.md
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/README.md b/ambari-infra/ambari-infra-manager/README.md
index 033bbb2..d3527c4 100644
--- a/ambari-infra/ambari-infra-manager/README.md
+++ b/ambari-infra/ambari-infra-manager/README.md
@@ -22,4 +22,10 @@ TODO
 ## Build & Run Application
 ```bash
 mvn clean package exec:java
+```
+
+## Build & Run Application in docker container
+```bash
+cd docker
+./infra-manager-docker.sh
 ```
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/build.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/build.xml b/ambari-infra/ambari-infra-manager/build.xml
index c7954d9..3d0f4da 100644
--- a/ambari-infra/ambari-infra-manager/build.xml
+++ b/ambari-infra/ambari-infra-manager/build.xml
@@ -35,6 +35,7 @@
     </copy>
     <copy todir="target/package" includeEmptyDirs="no">
       <fileset file="src/main/resources/infraManager.sh"/>
+      <fileset file="src/main/resources/infra-manager-env.sh"/>
       <fileset file="target/classes/infra-manager.properties"/>
       <fileset file="target/classes/log4j.xml"/>
     </copy>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/docker/Dockerfile b/ambari-infra/ambari-infra-manager/docker/Dockerfile
new file mode 100644
index 0000000..adb584a
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/docker/Dockerfile
@@ -0,0 +1,52 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+FROM centos:centos6
+
+RUN echo root:changeme | chpasswd
+
+RUN yum clean all -y && yum update -y
+RUN yum -y install vim wget rpm-build sudo which telnet tar openssh-server openssh-clients ntp git httpd lsof
+RUN rpm -e --nodeps --justdb glibc-common
+RUN yum -y install glibc-common
+
+ENV HOME /root
+
+#Install JAVA
+ENV JAVA_VERSION 8u31
+ENV BUILD_VERSION b13
+RUN wget --no-cookies --no-check-certificate --header "Cookie: oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/$JAVA_VERSION-$BUILD_VERSION/jdk-$JAVA_VERSION-linux-x64.rpm" -O jdk-8-linux-x64.rpm
+RUN rpm -ivh jdk-8-linux-x64.rpm
+ENV JAVA_HOME /usr/java/default/
+
+#Install Maven
+RUN mkdir -p /opt/maven
+WORKDIR /opt/maven
+RUN wget http://archive.apache.org/dist/maven/maven-3/3.3.1/binaries/apache-maven-3.3.1-bin.tar.gz
+RUN tar -xvzf /opt/maven/apache-maven-3.3.1-bin.tar.gz
+RUN rm -rf /opt/maven/apache-maven-3.3.1-bin.tar.gz
+
+ENV M2_HOME /opt/maven/apache-maven-3.3.1
+ENV MAVEN_OPTS -Xmx2048m
+ENV PATH $PATH:$JAVA_HOME/bin:$M2_HOME/bin
+
+# SSH key
+RUN ssh-keygen -f /root/.ssh/id_rsa -t rsa -N ''
+RUN cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
+RUN chmod 600 /root/.ssh/authorized_keys
+RUN sed -ri 's/UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config
+
+ADD bin/start.sh /root/start.sh
+RUN chmod +x /root/start.sh
+
+WORKDIR /root
+CMD /root/start.sh
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/docker/bin/start.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/docker/bin/start.sh b/ambari-infra/ambari-infra-manager/docker/bin/start.sh
new file mode 100755
index 0000000..076c06f
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/docker/bin/start.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+export INFRA_MANAGER_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=5007,server=y,suspend=n"
+touch /root/infra-manager.log
+/root/ambari-infra-manager/infraManager.sh --port 61890 > /root/infra-manager.log
+tail -f /root/infra-manager.log
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh
new file mode 100755
index 0000000..87d6b8a
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+sdir="`dirname \"$0\"`"
+: ${1:?"argument is missing: (start|stop|build-and-run|build|build-docker-and-run|build-mvn-and-run|build-docker-only|build-mvn-only)"}
+command="$1"
+
+function build_infra_manager_container() {
+  pushd $sdir
+  docker build -t ambari-infra-manager:v1.0 .
+  popd
+}
+
+function build_infra_manager_project() {
+  pushd $sdir/../
+  mvn clean package -DskipTests
+  popd
+}
+
+function kill_infra_manager_container() {
+  echo "Try to remove infra manager container if exists ..."
+  docker rm -f infra-manager
+}
+
+function start_infra_manager_container() {
+ echo "Start infra manager container ..."
+ pushd $sdir/../
+ local AMBARI_INFRA_MANAGER_LOCATION=$(pwd)
+ popd
+ kill_infra_manager_container
+ docker run -d --name infra-manager --hostname infra-manager.apache.org \
+   -v $AMBARI_INFRA_MANAGER_LOCATION/target/package:/root/ambari-infra-manager -p 61890:61890 -p 5007:5007 \
+   ambari-infra-manager:v1.0
+  ip_address=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' logsearch)
+  echo "Ambari Infra Manager container started on $ip_address (for Mac OSX route to boot2docker/docker-machine VM address, e.g.: 'sudo route add -net 172.17.0.0/16 192.168.59.103')"
+  echo "You can follow Log Search logs with 'docker logs -f infra-manager' command"
+}
+
+case $command in
+  "build-and-run")
+     build_infra_manager_project
+     build_infra_manager_container
+     start_infra_manager_container
+     ;;
+  "build")
+     build_infra_manager_project
+     start_infra_manager_container
+     ;;
+  "build-docker-and-run")
+     build_infra_manager_container
+     start_infra_manager_container
+     ;;
+  "build-mvn-and-run")
+     build_infra_manager_project
+     build_infra_manager_container
+     ;;
+  "build-docker-only")
+     build_infra_manager_container
+     ;;
+  "build-mvn-only")
+     build_infra_manager_project
+     ;;
+  "start")
+     start_infra_manager_container
+     ;;
+  "stop")
+     kill_infra_manager_container
+     ;;
+   *)
+   echo "Available commands: (start|stop|build-and-run|build|build-docker-and-run|build-mvn-and-run|build-docker-only|build-mvn-only)"
+   ;;
+esac
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh b/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh
new file mode 100644
index 0000000..c7e11c3
--- /dev/null
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/infra-manager-env.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Extend with java options or system properties. e.g.: INFRA_MANAGER_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=5007,server=y,suspend=n"
+export INFRA_MANAGER_OPTS=""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8bf136a4/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh b/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
index 9f40d5c..65287b2 100644
--- a/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
+++ b/ambari-infra/ambari-infra-manager/src/main/resources/infraManager.sh
@@ -17,4 +17,4 @@
 JVM="java"
 sdir="`dirname \"$0\"`"
 
-PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "/etc/ambari-infra-manager/conf:$sdir:$sdir/libs/*" org.apache.ambari.infra.InfraManager ${1+"$@"}
\ No newline at end of file
+PATH=$JAVA_HOME/bin:$PATH nohup $JVM -classpath "/etc/ambari-infra-manager/conf:$sdir:$sdir/libs/*" $INFRA_MANAGER_OPTS org.apache.ambari.infra.InfraManager ${1+"$@"} &
\ No newline at end of file


[05/25] ambari git commit: AMBARI-21023. HDP 3.0 TP - create service definition for Sqoop with configs, kerberos, widgets, etc.(vbrodetsky)

Posted by jl...@apache.org.
AMBARI-21023. HDP 3.0 TP - create service definition for Sqoop with configs, kerberos, widgets, etc.(vbrodetsky)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b3339123
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b3339123
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b3339123

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: b33391236b385e2e4620031b9773261bce8efdf6
Parents: 47b845f
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Tue May 16 22:16:59 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Tue May 16 22:16:59 2017 +0300

----------------------------------------------------------------------
 .../sqoop-atlas-application.properties.xml      |  47 +++++++
 .../SQOOP/1.4.4.3.0/configuration/sqoop-env.xml |  87 ++++++++++++
 .../1.4.4.3.0/configuration/sqoop-site.xml      |  38 ++++++
 .../SQOOP/1.4.4.3.0/kerberos.json               |  20 +++
 .../SQOOP/1.4.4.3.0/metainfo.xml                | 115 ++++++++++++++++
 .../SQOOP/1.4.4.3.0/package/scripts/__init__.py |  19 +++
 .../SQOOP/1.4.4.3.0/package/scripts/params.py   |  27 ++++
 .../1.4.4.3.0/package/scripts/params_linux.py   | 135 +++++++++++++++++++
 .../1.4.4.3.0/package/scripts/params_windows.py |  30 +++++
 .../1.4.4.3.0/package/scripts/service_check.py  |  62 +++++++++
 .../SQOOP/1.4.4.3.0/package/scripts/sqoop.py    | 124 +++++++++++++++++
 .../1.4.4.3.0/package/scripts/sqoop_client.py   |  66 +++++++++
 .../SQOOP/1.4.4.3.0/role_command_order.json     |   6 +
 .../stacks/HDP/3.0/services/SQOOP/metainfo.xml  |  27 ++++
 14 files changed, 803 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-atlas-application.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-atlas-application.properties.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-atlas-application.properties.xml
new file mode 100644
index 0000000..1364776
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-atlas-application.properties.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!--
+  These two security properties will only be read if the cluster is Kerberized, but ok to add them even without Kerberos.
+  It's important to have at least one property in this config file so it gets added while merging configs during EU/RU
+  from an earlier stack to HDP 2.5+.
+  Also, it allows a fresh install with Sqoop to expose this config type in the UI.
+  -->
+  <property>
+    <name>atlas.jaas.KafkaClient.option.useTicketCache</name>
+    <value>true</value>
+    <description>
+      Set this to "true" if you want the TGT to be obtained from the ticket cache.
+      Set this option to "false" if you do not want this module to use the ticket cache.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>atlas.jaas.KafkaClient.option.renewTicket</name>
+    <value>true</value>
+    <description>
+      Set this to "true" if you want the TGT to renew the ticket when it expires.
+      Set this option to "false" if you do not want this module to renew tickets.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-env.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-env.xml
new file mode 100644
index 0000000..6e16ab33
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-env.xml
@@ -0,0 +1,87 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- sqoop-env.sh -->
+  <property>
+    <name>sqoop.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>sqoop-env template</display-name>
+    <description>This is the jinja template for sqoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+#Set path to where bin/hadoop is available
+#Set path to where bin/hadoop is available
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+#set the path to where bin/hbase is available
+export HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}
+
+#Set the path to where bin/hive is available
+export HIVE_HOME=${HIVE_HOME:-{{hive_home}}}
+
+#Set the path for where zookeper config dir is
+export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
+
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2&gt; /dev/null`:${SQOOP_USER_CLASSPATH}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>sqoop_user</name>
+    <display-name>Sqoop User</display-name>
+    <description>User to run Sqoop as</description>
+    <property-type>USER</property-type>
+    <value>sqoop</value>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>jdbc_drivers</name>
+    <description>Comma separated list of additional JDBC drivers class names</description>
+    <value> </value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-site.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-site.xml
new file mode 100644
index 0000000..389550d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/configuration/sqoop-site.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- This property was valid in HDP 2.3 and 2.4, but removed in HDP 2.5 -->
+  <property>
+    <name>sqoop.job.data.publish.class</name>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>sqoop-env</type>
+        <name>sqoop.atlas.hook</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/kerberos.json
new file mode 100644
index 0000000..de12e7c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/kerberos.json
@@ -0,0 +1,20 @@
+{
+  "services": [
+    {
+      "name": "SQOOP",
+      "configurations": [
+        {
+          "sqoop-atlas-application.properties": {
+            "atlas.jaas.KafkaClient.option.useTicketCache": "true",
+            "atlas.jaas.KafkaClient.option.renewTicket": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "SQOOP"
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/metainfo.xml
new file mode 100644
index 0000000..999d93a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/metainfo.xml
@@ -0,0 +1,115 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <displayName>Sqoop</displayName>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.4.3.0</version>
+
+      <components>
+        <component>
+          <name>SQOOP</name>
+          <displayName>Sqoop Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/sqoop_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>sqoop-site.xml</fileName>
+              <dictionaryName>sqoop-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>sqoop-env.sh</fileName>
+              <dictionaryName>sqoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>mysql-connector-java</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_mysql_connector</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>sqoop_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>sqoop-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+      
+      <configuration-dependencies>
+        <config-type>sqoop-env</config-type>
+        <config-type>sqoop-site</config-type>
+        <config-type>application.properties</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params.py
new file mode 100644
index 0000000..61573ee
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..c1138b3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_linux.py
@@ -0,0 +1,135 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_kinit_path import get_kinit_path
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
+
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'SQOOP' : 'sqoop-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SQOOP")
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+# Needed since this is an Atlas Hook service.
+cluster_name = config['clusterName']
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+# default hadoop params
+sqoop_conf_dir = "/usr/lib/sqoop/conf"
+sqoop_lib = "/usr/lib/sqoop/lib"
+hadoop_home = '/usr/lib/hadoop'
+hbase_home = "/usr/lib/hbase"
+hive_home = "/usr/lib/hive"
+sqoop_bin_dir = "/usr/bin"
+zoo_conf_dir = "/etc/zookeeper"
+
+# For stack versions supporting rolling upgrade
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  sqoop_conf_dir = format("{stack_root}/current/sqoop-client/conf")
+  sqoop_lib = format("{stack_root}/current/sqoop-client/lib")
+  hadoop_home = format("{stack_root}/current/hadoop-client")
+  hbase_home = format("{stack_root}/current/hbase-client")
+  hive_home = format("{stack_root}/current/hive-client")
+  sqoop_bin_dir = format("{stack_root}/current/sqoop-client/bin/")
+  zoo_conf_dir = format("{stack_root}/current/zookeeper-client/conf")
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+user_group = config['configurations']['cluster-env']['user_group']
+sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
+
+sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+#JDBC driver jar name
+sqoop_jdbc_drivers_dict = []
+sqoop_jdbc_drivers_name_dict = {}
+sqoop_jdbc_drivers_to_remove = {}
+if "jdbc_drivers" in config['configurations']['sqoop-env']:
+  sqoop_jdbc_drivers = config['configurations']['sqoop-env']['jdbc_drivers'].split(',')
+
+  for driver_name in sqoop_jdbc_drivers:
+    previous_jdbc_jar_name = None
+    driver_name = driver_name.strip()
+    if driver_name and not driver_name == '':
+      if driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
+        jdbc_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+        jdbc_driver_name = "mssql"
+      elif driver_name == "com.mysql.jdbc.Driver":
+        jdbc_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+        jdbc_driver_name = "mysql"
+      elif driver_name == "org.postgresql.Driver":
+        jdbc_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+        jdbc_driver_name = "postgres"
+      elif driver_name == "oracle.jdbc.driver.OracleDriver":
+        jdbc_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+        jdbc_driver_name = "oracle"
+      elif driver_name == "org.hsqldb.jdbc.JDBCDriver":
+        jdbc_name = default("/hostLevelParams/custom_hsqldb_jdbc_name", None)
+        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_hsqldb_jdbc_name", None)
+        jdbc_driver_name = "hsqldb"
+    else:
+      continue
+    sqoop_jdbc_drivers_dict.append(jdbc_name)
+    sqoop_jdbc_drivers_to_remove[jdbc_name] = previous_jdbc_jar_name
+    sqoop_jdbc_drivers_name_dict[jdbc_name] = jdbc_driver_name
+jdk_location = config['hostLevelParams']['jdk_location']
+
+
+########################################################
+############# Atlas related params #####################
+########################################################
+#region Atlas Hooks
+sqoop_atlas_application_properties = default('/configurations/sqoop-atlas-application.properties', {})
+enable_atlas_hook = default('/configurations/sqoop-env/sqoop.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+#endregion

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..f930765
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/params_windows.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+import os
+
+config = Script.get_config()
+
+sqoop_user = "sqoop"
+
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+sqoop_env_cmd_template = config['configurations']['sqoop-env']['content']
+sqoop_home_dir = os.environ["SQOOP_HOME"]
+sqoop_conf_dir = os.path.join(sqoop_home_dir, "conf")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..bb503f5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/service_check.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import format
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+import os
+
+class SqoopServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class SqoopServiceCheckDefault(SqoopServiceCheck):
+
+  def get_component_name(self):
+    return "sqoop-server"
+
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser_principal}"),
+              user = params.smokeuser,
+      )
+    Execute("sqoop version",
+            user = params.smokeuser,
+            path = params.sqoop_bin_dir,
+            logoutput = True
+    )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class SqoopServiceCheckWindows(SqoopServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
+    service = "SQOOP"
+    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop.py
new file mode 100644
index 0000000..436402c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop.py
@@ -0,0 +1,124 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+# Python Imports
+import os
+
+# Local Imports
+from resource_management.core.source import InlineTemplate, DownloadSource
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.get_config import get_config
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.resources.system import File, Link, Directory
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook, setup_atlas_jar_symlinks
+from ambari_commons.constants import SERVICE
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def sqoop(type=None):
+  import params
+  File(os.path.join(params.sqoop_conf_dir, "sqoop-env.cmd"),
+       content=InlineTemplate(params.sqoop_env_cmd_template)
+  )
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def sqoop(type=None):
+  import params
+  Link(params.sqoop_lib + "/mysql-connector-java.jar",
+       to = '/usr/share/java/mysql-connector-java.jar'
+  )
+
+  jdbc_connector()
+  
+  Directory(params.sqoop_conf_dir,
+            owner = params.sqoop_user,
+            group = params.user_group,
+            create_parents = True
+  )
+
+  configs = {}
+  sqoop_site_config = get_config('sqoop-site')
+  if sqoop_site_config:
+    configs.update(sqoop_site_config)
+
+    XmlConfig("sqoop-site.xml",
+              conf_dir = params.sqoop_conf_dir,
+              configurations = configs,
+              configuration_attributes=params.config['configuration_attributes']['sqoop-site'],
+              owner = params.sqoop_user,
+              group = params.user_group
+              )
+
+  # Generate atlas-application.properties.xml file and symlink the hook jars
+  if params.enable_atlas_hook:
+    atlas_hook_filepath = os.path.join(params.sqoop_conf_dir, params.atlas_hook_filename)
+    setup_atlas_hook(SERVICE.SQOOP, params.sqoop_atlas_application_properties, atlas_hook_filepath, params.sqoop_user, params.user_group)
+    setup_atlas_jar_symlinks("sqoop", params.sqoop_lib)
+
+
+  File(format("{sqoop_conf_dir}/sqoop-env.sh"),
+    owner=params.sqoop_user,
+    group = params.user_group,
+    content=InlineTemplate(params.sqoop_env_sh_template)
+  )
+  update_config_permissions(["sqoop-env-template.sh",
+                             "sqoop-site-template.xml",
+                             "sqoop-site.xml"])
+  pass
+
+def update_config_permissions(names):
+  import params
+  for filename in names:
+    full_filename = os.path.join(params.sqoop_conf_dir, filename)
+    File(full_filename,
+          owner = params.sqoop_user,
+          group = params.user_group,
+          only_if = format("test -e {full_filename}")
+    )
+
+def jdbc_connector():
+  import params
+  from urllib2 import HTTPError
+  from resource_management import Fail
+  for jar_name in params.sqoop_jdbc_drivers_dict:
+    if not jar_name or 'mysql' in jar_name:
+      continue
+    downloaded_custom_connector = format("{sqoop_lib}/{jar_name}")
+    custom_connector_to_remove = format("{sqoop_lib}/" + str(params.sqoop_jdbc_drivers_to_remove[jar_name]))
+    jdbc_driver_label = params.sqoop_jdbc_drivers_name_dict[jar_name]
+    driver_curl_source = format("{jdk_location}/{jar_name}")
+    environment = {
+      "no_proxy": format("{ambari_server_hostname}")
+    }
+    try:
+      if custom_connector_to_remove and os.path.isfile(custom_connector_to_remove):
+        File(custom_connector_to_remove, action='delete')
+
+      File(downloaded_custom_connector,
+           content = DownloadSource(driver_curl_source),
+           mode = 0644,
+      )
+    except HTTPError:
+      error_string = format("Could not download {driver_curl_source}\n\
+                 Please upload jdbc driver to server by run command:\n\
+                 ambari-server setup --jdbc-db={jdbc_driver_label} --jdbc-driver=<PATH TO DRIVER>\n\
+                 at {ambari_server_hostname}") 
+      raise Fail(error_string)
+                 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop_client.py
new file mode 100644
index 0000000..d420fab
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/package/scripts/sqoop_client.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from sqoop import sqoop
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+class SqoopClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    sqoop(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class SqoopClientDefault(SqoopClient):
+  def get_component_name(self):
+    return "sqoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): 
+      conf_select.select(params.stack_name, "sqoop", params.version)
+      stack_select.select("sqoop-client", params.version)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class SqoopClientWindows(SqoopClient):
+  pass
+
+if __name__ == "__main__":
+  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/role_command_order.json
new file mode 100644
index 0000000..f9a6cb4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/role_command_order.json
@@ -0,0 +1,6 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for SQOOP",
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3339123/ambari-server/src/main/resources/stacks/HDP/3.0/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/SQOOP/metainfo.xml
new file mode 100644
index 0000000..757bce5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/SQOOP/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>SQOOP</name>
+            <version>1.4.4.3.0</version>
+            <extends>common-services/SQOOP/1.4.4.3.0</extends>
+        </service>
+    </services>
+</metainfo>


[15/25] ambari git commit: AMBARI-21041. Pause Upgrade button is present in the wizard even after Upgrade is finalized (alexantonenko)

Posted by jl...@apache.org.
AMBARI-21041. Pause Upgrade button is present in the wizard even after Upgrade is finalized (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/99d4ca1e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/99d4ca1e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/99d4ca1e

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 99d4ca1e7171648086a3c0536cd6badbac172707
Parents: 3b94d3c
Author: Alex Antonenko <hi...@gmail.com>
Authored: Wed May 17 15:27:01 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Wed May 17 15:51:50 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/app.js                                         | 7 +++++++
 .../controllers/main/admin/stack_and_upgrade_controller.js    | 2 +-
 2 files changed, 8 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/99d4ca1e/ambari-web/app/app.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/app.js b/ambari-web/app/app.js
index 6d02603..c22d71c 100644
--- a/ambari-web/app/app.js
+++ b/ambari-web/app/app.js
@@ -69,6 +69,13 @@ module.exports = Em.Application.create({
   upgradeState: 'INIT',
 
   /**
+   * Check if upgrade is in INIT state
+   * 'INIT' is set on upgrade start and when it's finished
+   * @type {boolean}
+   */
+  upgradeInit: Em.computed.equal('upgradeState', 'INIT'),
+
+  /**
    * flag is true when upgrade process is running
    * @returns {boolean}
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/99d4ca1e/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 94cdf1c..0f2efb0 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -71,7 +71,7 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
   /**
    * @type {boolean}
    */
-  showPauseButton: Em.computed.and('!App.upgradeSuspended', '!App.upgradeCompleted'),
+  showPauseButton: Em.computed.and('!App.upgradeSuspended', '!App.upgradeCompleted', '!App.upgradeInit'),
 
   /**
    * @type {boolean}


[08/25] ambari git commit: AMBARI-21032. HDP 3.0 TP - create service definition for Knox with configs, kerberos, widgets, etc.(vbrodetsky)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_ldap.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_ldap.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_ldap.py
new file mode 100644
index 0000000..b6f1b89
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_ldap.py
@@ -0,0 +1,59 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import File
+from ambari_commons import OSConst
+from resource_management.core.source import InlineTemplate
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+def _ldap_common():
+    import params
+
+    File(os.path.join(params.knox_conf_dir, 'ldap-log4j.properties'),
+         mode=params.mode,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=InlineTemplate(params.ldap_log4j)
+    )
+
+    File(os.path.join(params.knox_conf_dir, 'users.ldif'),
+         mode=params.mode,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=params.users_ldif
+    )
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def ldap():
+  import params
+
+  # Manually overriding service logon user & password set by the installation package
+  ServiceConfig(params.knox_ldap_win_service_name,
+                action="change_user",
+                username = params.knox_user,
+                password = Script.get_password(params.knox_user))
+
+  _ldap_common()
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def ldap():
+  _ldap_common()

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params.py
new file mode 100644
index 0000000..ad1a1dc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+retryAble = default("/commandParams/command_retry_enabled", False)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..4558069
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_linux.py
@@ -0,0 +1,457 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import status_params
+
+from resource_management.core.logger import Logger
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_port_from_url import get_port_from_url
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from status_params import *
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select, conf_select
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+tmp_dir = Script.get_tmp_dir()
+stack_name = status_params.stack_name
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+version = default("/commandParams/version", None)
+# E.g., 2.3.2.0
+version_formatted = format_stack_version(version)
+
+# E.g., 2.3
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT, version_for_stack_feature_checks)
+
+# This is the version whose state is CURRENT. During an RU, this is the source version.
+# DO NOT format it since we need the build number too.
+upgrade_from_version = default("/hostLevelParams/current_version", None)
+
+# server configurations
+# Default value used in HDP 2.3.0.0 and earlier.
+knox_data_dir = '/var/lib/knox/data'
+
+# Important, it has to be strictly greater than 2.3.0.0!!!
+Logger.info(format("Stack version to use is {version_formatted}"))
+if version_formatted and check_stack_feature(StackFeature.KNOX_VERSIONED_DATA_DIR, version_formatted):
+  # This is the current version. In the case of a Rolling Upgrade, it will be the newer version.
+  # In the case of a Downgrade, it will be the version downgrading to.
+  # This is always going to be a symlink to /var/lib/knox/data_${version}
+  knox_data_dir = format('{stack_root}/{version}/knox/data')
+  Logger.info(format("Detected stack with version {version}, will use knox_data_dir = {knox_data_dir}"))
+
+
+knox_master_secret_path = format('{knox_data_dir}/security/master')
+knox_cert_store_path = format('{knox_data_dir}/security/keystores/gateway.jks')
+knox_user = default("/configurations/knox-env/knox_user", "knox")
+
+# server configurations
+knox_data_dir = '/var/lib/knox/data'
+knox_logs_dir = '/var/log/knox'
+
+# default parameters
+knox_bin = '/usr/bin/gateway'
+knox_conf_dir = '/etc/knox/conf'
+ldap_bin = '/usr/lib/knox/bin/ldap.sh'
+knox_client_bin = '/usr/lib/knox/bin/knoxcli.sh'
+
+# HDP 2.2+ parameters
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  knox_bin = format('{stack_root}/current/knox-server/bin/gateway.sh')
+  knox_conf_dir = format('{stack_root}/current/knox-server/conf')
+  ldap_bin = format('{stack_root}/current/knox-server/bin/ldap.sh')
+  knox_client_bin = format('{stack_root}/current/knox-server/bin/knoxcli.sh')
+  knox_master_secret_path = format('{stack_root}/current/knox-server/data/security/master')
+  knox_cert_store_path = format('{stack_root}/current/knox-server/data/security/keystores/gateway.jks')
+  knox_data_dir = format('{stack_root}/current/knox-server/data/')
+
+knox_group = default("/configurations/knox-env/knox_group", "knox")
+mode = 0644
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+namenode_rpc = None
+
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname.lower() in nn_host.lower():
+      namenode_id = nn_id
+      namenode_rpc = nn_host
+    # With HA enabled namenode_address is recomputed
+  namenode_address = format('hdfs://{dfs_ha_nameservices}')
+
+namenode_port_map = {}
+if dfs_ha_enabled:
+    for nn_id in dfs_ha_namemodes_ids_list:
+        nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.http-address.{dfs_ha_nameservices}.{nn_id}')]
+        nn_host_parts = nn_host.split(':')
+        namenode_port_map[nn_host_parts[0]] = nn_host_parts[1]
+
+
+namenode_hosts = default("/clusterHostInfo/namenode_host", None)
+if type(namenode_hosts) is list:
+  namenode_host = namenode_hosts[0]
+else:
+  namenode_host = namenode_hosts
+
+has_namenode = not namenode_host == None
+namenode_http_port = "50070"
+namenode_rpc_port = "8020"
+
+if has_namenode:
+  if 'dfs.namenode.http-address' in config['configurations']['hdfs-site']:
+    namenode_http_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
+  if dfs_ha_enabled and namenode_rpc:
+    namenode_rpc_port = get_port_from_url(namenode_rpc)
+  else:
+    if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
+      namenode_rpc_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.rpc-address'])
+
+webhdfs_service_urls = ""
+
+def buildUrlElement(protocol, hdfs_host, port, servicePath) :
+  openTag = "<url>"
+  closeTag = "</url>"
+  proto = protocol + "://"
+  newLine = "\n"
+  if hdfs_host is None or port is None:
+      return ""
+  else:
+    return openTag + proto + hdfs_host + ":" + port + servicePath + closeTag + newLine
+
+namenode_host_keys = namenode_port_map.keys();
+if len(namenode_host_keys) > 0:
+    for host in namenode_host_keys:
+      webhdfs_service_urls += buildUrlElement("http", host, namenode_port_map[host], "/webhdfs")
+else:
+  webhdfs_service_urls = buildUrlElement("http", namenode_host, namenode_http_port, "/webhdfs")
+
+
+rm_hosts = default("/clusterHostInfo/rm_host", None)
+if type(rm_hosts) is list:
+  rm_host = rm_hosts[0]
+else:
+  rm_host = rm_hosts
+has_rm = not rm_host == None
+
+jt_rpc_port = "8050"
+rm_port = "8080"
+
+if has_rm:
+  if 'yarn.resourcemanager.address' in config['configurations']['yarn-site']:
+    jt_rpc_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.address'])
+
+  if 'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']:
+    rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
+
+hive_http_port = default('/configurations/hive-site/hive.server2.thrift.http.port', "10001")
+hive_http_path = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
+hive_server_hosts = default("/clusterHostInfo/hive_server_host", None)
+if type(hive_server_hosts) is list:
+  hive_server_host = hive_server_hosts[0]
+else:
+  hive_server_host = hive_server_hosts
+
+templeton_port = default('/configurations/webhcat-site/templeton.port', "50111")
+webhcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", None)
+if type(webhcat_server_hosts) is list:
+  webhcat_server_host = webhcat_server_hosts[0]
+else:
+  webhcat_server_host = webhcat_server_hosts
+
+hbase_master_port = default('/configurations/hbase-site/hbase.rest.port', "8080")
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", None)
+if type(hbase_master_hosts) is list:
+  hbase_master_host = hbase_master_hosts[0]
+else:
+  hbase_master_host = hbase_master_hosts
+
+oozie_server_hosts = default("/clusterHostInfo/oozie_server", None)
+if type(oozie_server_hosts) is list:
+  oozie_server_host = oozie_server_hosts[0]
+else:
+  oozie_server_host = oozie_server_hosts
+
+has_oozie = not oozie_server_host == None
+oozie_server_port = "11000"
+
+if has_oozie:
+  oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+
+# Knox managed properties
+knox_managed_pid_symlink= format('{stack_root}/current/knox-server/pids')
+
+#knox log4j
+knox_gateway_log_maxfilesize = default('/configurations/gateway-log4j/knox_gateway_log_maxfilesize',256)
+knox_gateway_log_maxbackupindex = default('/configurations/gateway-log4j/knox_gateway_log_maxbackupindex',20)
+knox_ldap_log_maxfilesize = default('/configurations/ldap-log4j/knox_ldap_log_maxfilesize',256)
+knox_ldap_log_maxbackupindex = default('/configurations/ldap-log4j/knox_ldap_log_maxbackupindex',20)
+
+# server configurations
+knox_master_secret = config['configurations']['knox-env']['knox_master_secret']
+knox_host_name = config['clusterHostInfo']['knox_gateway_hosts'][0]
+knox_host_name_in_cluster = config['hostname']
+knox_host_port = config['configurations']['gateway-site']['gateway.port']
+topology_template = config['configurations']['topology']['content']
+admin_topology_template = default('/configurations/admin-topology/content', None)
+knoxsso_topology_template = config['configurations']['knoxsso-topology']['content']
+gateway_log4j = config['configurations']['gateway-log4j']['content']
+ldap_log4j = config['configurations']['ldap-log4j']['content']
+users_ldif = config['configurations']['users-ldif']['content']
+java_home = config['hostLevelParams']['java_home']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+if security_enabled:
+  knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
+  _hostname_lowercase = config['hostname'].lower()
+  knox_principal_name = config['configurations']['knox-env']['knox_principal_name'].replace('_HOST',_hostname_lowercase)
+
+# for curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger knox plugin start section
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger knox plugin enabled property
+enable_ranger_knox = default("/configurations/ranger-knox-plugin-properties/ranger-knox-plugin-enabled", "No")
+enable_ranger_knox = True if enable_ranger_knox.lower() == 'yes' else False
+
+# get ranger knox properties if enable_ranger_knox is True
+if enable_ranger_knox:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-knox-security']['ranger.plugin.knox.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger knox service/repositry name
+  repo_name = str(config['clusterName']) + '_knox'
+  repo_name_value = config['configurations']['ranger-knox-security']['ranger.plugin.knox.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  knox_home = config['configurations']['ranger-knox-plugin-properties']['KNOX_HOME']
+  common_name_for_certificate = config['configurations']['ranger-knox-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_knox:
+    external_admin_username = default('/configurations/ranger-knox-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-knox-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-knox-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-knox-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-knox-plugin-properties']
+  policy_user = config['configurations']['ranger-knox-plugin-properties']['policy_user']
+  repo_config_password = config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
+
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{stack_root}/current/knox-server/ext/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{stack_root}/current/knox-server/ext/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
+
+  knox_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'knox.url': format("https://{knox_host_name}:{knox_host_port}/gateway/admin/api/v1/topologies"),
+    'commonNameForCertificate': common_name_for_certificate
+  }
+
+  knox_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(knox_ranger_plugin_config),
+    'description': 'knox repo',
+    'name': repo_name,
+    'repositoryType': 'knox',
+    'assetType': '5',
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    knox_ranger_plugin_config.update(custom_ranger_service_config)
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    knox_ranger_plugin_config['policy.download.auth.users'] = knox_user
+    knox_ranger_plugin_config['tag.download.auth.users'] = knox_user
+
+  if stack_supports_ranger_kerberos:
+    knox_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    knox_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': knox_ranger_plugin_config,
+      'description': 'knox repo',
+      'name': repo_name,
+      'type': 'knox'
+    }
+
+  xa_audit_db_is_enabled = False
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.db']
+
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor == 'sqla':
+    xa_audit_db_is_enabled = False
+
+# need this to capture cluster name from where ranger knox plugin is enabled
+cluster_name = config['clusterName']
+
+# ranger knox plugin end section
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
+hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
+default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources()
+)
+
+druid_coordinator_urls = ""
+if "druid-coordinator" in config['configurations']:
+  port = config['configurations']['druid-coordinator']['druid.port']
+  for host in config['clusterHostInfo']['druid_coordinator_hosts']:
+    druid_coordinator_urls += buildUrlElement("http", host, port, "")
+
+druid_overlord_urls = ""
+if "druid-overlord" in config['configurations']:
+  port = config['configurations']['druid-overlord']['druid.port']
+  for host in config['clusterHostInfo']['druid_overlord_hosts']:
+    druid_overlord_urls += buildUrlElement("http", host, port, "")
+
+druid_broker_urls = ""
+if "druid-broker" in config['configurations']:
+  port = config['configurations']['druid-broker']['druid.port']
+  for host in config['clusterHostInfo']['druid_broker_hosts']:
+    druid_broker_urls += buildUrlElement("http", host, port, "")
+
+druid_router_urls = ""
+if "druid-router" in config['configurations']:
+  port = config['configurations']['druid-router']['druid.port']
+  for host in config['clusterHostInfo']['druid_router_hosts']:
+    druid_router_urls += buildUrlElement("http", host, port, "")
+
+zeppelin_ui_urls = ""
+zeppelin_ws_urls = ""
+websocket_support = "false"
+if "zeppelin-config" in config['configurations']:
+  port = config['configurations']['zeppelin-config']['zeppelin.server.port']
+  protocol = "https" if config['configurations']['zeppelin-config']['zeppelin.ssl'] else "http"
+  host = config['clusterHostInfo']['zeppelin_master_hosts'][0]
+  zeppelin_ui_urls += buildUrlElement(protocol, host, port, "")
+  zeppelin_ws_urls += buildUrlElement("ws", host, port, "/ws")
+  websocket_support = "true"

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..631146d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/params_windows.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+import os
+from status_params import *
+
+# server configurations
+config = Script.get_config()
+
+stack_root = None
+knox_home = None
+knox_conf_dir = None
+knox_logs_dir = None
+knox_bin = None
+ldap_bin = None
+knox_client_bin = None
+knox_data_dir = None
+
+knox_master_secret_path = None
+knox_cert_store_path = None
+
+try:
+  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+  knox_home = os.environ['KNOX_HOME']
+  knox_conf_dir = os.environ['KNOX_CONF_DIR']
+  knox_logs_dir = os.environ['KNOX_LOG_DIR']
+  knox_bin = os.path.join(knox_home, 'bin', 'gateway.exe')
+  ldap_bin = os.path.join(knox_home, 'bin', 'ldap.exe')
+  knox_client_bin = os.path.join(knox_home, 'bin', 'knoxcli.cmd')
+  knox_data_dir = os.path.join(knox_home, 'data')
+
+  knox_master_secret_path = os.path.join(knox_data_dir, 'security', 'master')
+  knox_cert_store_path = os.path.join(knox_data_dir, 'security', 'keystores', 'gateway.jks')
+except:
+  pass
+
+knox_host_port = config['configurations']['gateway-site']['gateway.port']
+knox_host_name = config['clusterHostInfo']['knox_gateway_hosts'][0]
+knox_host_name_in_cluster = config['hostname']
+knox_master_secret = config['configurations']['knox-env']['knox_master_secret']
+topology_template = config['configurations']['topology']['content']
+admin_topology_template = default('/configurations/admin-topology/content', None)
+knoxsso_topology_template = config['configurations']['knoxsso-topology']['content']
+gateway_log4j = config['configurations']['gateway-log4j']['content']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+ldap_log4j = config['configurations']['ldap-log4j']['content']
+users_ldif = config['configurations']['users-ldif']['content']
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+knox_user = hadoop_user
+hdfs_user = hadoop_user
+knox_group = None
+mode = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/service_check.py
new file mode 100644
index 0000000..a2134d6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/service_check.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.system import Execute, File
+from resource_management.libraries.functions.format import format
+from resource_management.core.source import StaticFile
+import sys
+import os
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+
+class KnoxServiceCheck(Script):
+  def service_check(self, env):
+    pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class KnoxServiceCheckWindows(KnoxServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    temp_dir = os.path.join(os.path.dirname(params.knox_home), "temp")
+    validateKnoxFileName = "validateKnoxStatus.py"
+    validateKnoxFilePath = os.path.join(temp_dir, validateKnoxFileName)
+    python_executable = sys.executable
+    validateStatusCmd = "%s %s -p %s -n %s" % (python_executable, validateKnoxFilePath, params.knox_host_port, params.knox_host_name)
+
+    print "Test connectivity to knox server"
+
+    File(validateKnoxFilePath,
+         content=StaticFile(validateKnoxFileName)
+    )
+
+    Execute(validateStatusCmd,
+            tries=3,
+            try_sleep=5,
+            timeout=5,
+            logoutput=True
+    )
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class KnoxServiceCheckDefault(KnoxServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    validateKnoxFileName = "validateKnoxStatus.py"
+    validateKnoxFilePath = format("{tmp_dir}/{validateKnoxFileName}")
+    python_executable = sys.executable
+    validateStatusCmd = format("{python_executable} {validateKnoxFilePath} -p {knox_host_port} -n {knox_host_name}")
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
+    else:
+      smoke_cmd = validateStatusCmd
+
+    print "Test connectivity to knox server"
+
+    File(validateKnoxFilePath,
+         content=StaticFile(validateKnoxFileName),
+         mode=0755
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            user=params.smokeuser,
+            timeout=5,
+            logoutput=True
+    )
+
+
+if __name__ == "__main__":
+    KnoxServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/setup_ranger_knox.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/setup_ranger_knox.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/setup_ranger_knox.py
new file mode 100644
index 0000000..c486ef7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/setup_ranger_knox.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_core_site_for_required_plugins
+from resource_management.core.resources import File
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+
+def setup_ranger_knox(upgrade_type=None):
+  import params
+
+  if params.enable_ranger_knox:
+
+    stack_version = None
+    if upgrade_type is not None:
+      stack_version = params.version
+
+    if params.retryAble:
+      Logger.info("Knox: Setup ranger: command retry enables thus retrying if ranger admin is down !")
+    else:
+      Logger.info("Knox: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
+
+    if params.xml_configurations_supported and params.enable_ranger_knox and params.xa_audit_hdfs_is_enabled:
+      if params.has_namenode:
+        params.HdfsResource("/ranger/audit",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hdfs_user,
+                           group=params.hdfs_user,
+                           mode=0755,
+                           recursive_chmod=True
+        )
+        params.HdfsResource("/ranger/audit/knox",
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.knox_user,
+                           group=params.knox_user,
+                           mode=0700,
+                           recursive_chmod=True
+        )
+        params.HdfsResource(None, action="execute")
+
+        if params.namenode_hosts is not None and len(params.namenode_hosts) > 1:
+          Logger.info('Ranger Knox plugin is enabled in NameNode HA environment along with audit to Hdfs enabled, creating hdfs-site.xml')
+          XmlConfig("hdfs-site.xml",
+            conf_dir=params.knox_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.knox_user,
+            group=params.knox_group,
+            mode=0644
+          )
+        else:
+          File(format('{knox_conf_dir}/hdfs-site.xml'), action="delete")
+
+    if params.xml_configurations_supported:
+      api_version=None
+      if params.stack_supports_ranger_kerberos:
+        api_version='v2'
+      from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
+      setup_ranger_plugin('knox-server', 'knox', params.previous_jdbc_jar,
+                          params.downloaded_custom_connector, params.driver_curl_source,
+                          params.driver_curl_target, params.java_home,
+                          params.repo_name, params.knox_ranger_plugin_repo,
+                          params.ranger_env, params.ranger_plugin_properties,
+                          params.policy_user, params.policymgr_mgr_url,
+                          params.enable_ranger_knox, conf_dict=params.knox_conf_dir,
+                          component_user=params.knox_user, component_group=params.knox_group, cache_service_list=['knox'],
+                          plugin_audit_properties=params.config['configurations']['ranger-knox-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-knox-audit'],
+                          plugin_security_properties=params.config['configurations']['ranger-knox-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-knox-security'],
+                          plugin_policymgr_ssl_properties=params.config['configurations']['ranger-knox-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-knox-policymgr-ssl'],
+                          component_list=['knox-server'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                          credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
+                          ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                          stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble,api_version=api_version,
+                          is_security_enabled = params.security_enabled,
+                          is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos,
+                          component_user_principal=params.knox_principal_name if params.security_enabled else None,
+                          component_user_keytab=params.knox_keytab_path if params.security_enabled else None)
+    else:
+      from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
+      setup_ranger_plugin('knox-server', 'knox', params.previous_jdbc_jar,
+                        params.downloaded_custom_connector, params.driver_curl_source,
+                        params.driver_curl_target, params.java_home,
+                        params.repo_name, params.knox_ranger_plugin_repo,
+                        params.ranger_env, params.ranger_plugin_properties,
+                        params.policy_user, params.policymgr_mgr_url,
+                        params.enable_ranger_knox, conf_dict=params.knox_conf_dir,
+                        component_user=params.knox_user, component_group=params.knox_group, cache_service_list=['knox'],
+                        plugin_audit_properties=params.config['configurations']['ranger-knox-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-knox-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-knox-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-knox-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-knox-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-knox-policymgr-ssl'],
+                        component_list=['knox-server'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
+                        credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
+                        ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
+    if params.stack_supports_core_site_for_ranger_plugin and params.enable_ranger_knox and params.has_namenode and params.security_enabled:
+      Logger.info("Stack supports core-site.xml creation for Ranger plugin, creating core-site.xml from namenode configuraitions")
+      setup_core_site_for_required_plugins(component_user=params.knox_user, component_group=params.knox_group,create_core_site_path = params.knox_conf_dir, config = params.config)
+    else:
+      Logger.info("Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations")
+
+  else:
+    Logger.info('Ranger Knox plugin is not enabled')

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/status_params.py
new file mode 100644
index 0000000..3cbd920
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/status_params.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+if OSCheck.is_windows_family():
+  knox_gateway_win_service_name = "gateway"
+  knox_ldap_win_service_name = "ldap"
+else:
+  knox_conf_dir = '/etc/knox/conf'
+  if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+    knox_conf_dir = format('{stack_root}/current/knox-server/conf')
+  knox_pid_dir = config['configurations']['knox-env']['knox_pid_dir']
+  knox_pid_file = format("{knox_pid_dir}/gateway.pid")
+  ldap_pid_file = format("{knox_pid_dir}/ldap.pid")
+
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  if security_enabled:
+      knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
+      knox_principal_name = config['configurations']['knox-env']['knox_principal_name']
+  else:
+      knox_keytab_path = None
+      knox_principal_name = None
+
+  hostname = config['hostname'].lower()
+  knox_user = default("/configurations/knox-env/knox_user", "knox")
+  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+  temp_dir = Script.get_tmp_dir()
+  
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/upgrade.py
new file mode 100644
index 0000000..917f340
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/upgrade.py
@@ -0,0 +1,118 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import tempfile
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import tar_archive
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.script.script import Script
+
+
+BACKUP_TEMP_DIR = "knox-upgrade-backup"
+BACKUP_DATA_ARCHIVE = "knox-data-backup.tar"
+STACK_ROOT_DEFAULT = Script.get_stack_root()
+
+def backup_data():
+  """
+  Backs up the knox data as part of the upgrade process.
+  :return: Returns the path to the absolute backup directory.
+  """
+  Logger.info('Backing up Knox data directory before upgrade...')
+  directoryMappings = _get_directory_mappings_during_upgrade()
+
+  Logger.info("Directory mappings to backup: {0}".format(str(directoryMappings)))
+
+  absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
+  if not os.path.isdir(absolute_backup_dir):
+    os.makedirs(absolute_backup_dir)
+
+  for directory in directoryMappings:
+    if not os.path.isdir(directory):
+      raise Fail("Unable to backup missing directory {0}".format(directory))
+
+    archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
+    Logger.info('Compressing {0} to {1}'.format(directory, archive))
+
+    if os.path.exists(archive):
+      os.remove(archive)
+
+    # backup the directory, following symlinks instead of including them
+    tar_archive.archive_directory_dereference(archive, directory)
+
+  return absolute_backup_dir
+
+
+def seed_current_data_directory():
+  """
+  HDP stack example:
+
+  Knox uses "versioned" data directories in some stacks:
+  /usr/hdp/2.2.0.0-1234/knox/data -> /var/lib/knox/data
+  /usr/hdp/2.3.0.0-4567/knox/data -> /var/lib/knox/data-2.3.0.0-4567
+
+  If the stack being upgraded to supports versioned data directories for Knox, then we should
+  seed the data from the prior version. This is mainly because Knox keeps things like keystores
+  in the data directory and if those aren't copied over then it will re-create self-signed
+  versions. This side-effect behavior causes loss of service in clusters where Knox is using
+  custom keystores.
+
+  cp -R -p -f /usr/hdp/<old>/knox-server/data/. /usr/hdp/current/knox-server/data
+  :return:
+  """
+  import params
+
+  if params.version is None or params.upgrade_from_version is None:
+    raise Fail("The source and target versions are required")
+
+  if check_stack_feature(StackFeature.KNOX_VERSIONED_DATA_DIR, params.version):
+    Logger.info("Seeding Knox data from prior version...")
+
+    # <stack-root>/2.3.0.0-1234/knox/data/.
+    source_data_dir = os.path.join(params.stack_root, params.upgrade_from_version, "knox", "data", ".")
+
+    # <stack-root>/current/knox-server/data
+    target_data_dir = os.path.join(params.stack_root, "current", "knox-server", "data")
+
+    # recursive copy, overwriting, and preserving attributes
+    Execute(("cp", "-R", "-p", "-f", source_data_dir, target_data_dir), sudo = True)
+
+
+def _get_directory_mappings_during_upgrade():
+  """
+  Gets a dictionary of directory to archive name that represents the
+  directories that need to be backed up and their output tarball archive targets
+  :return:  the dictionary of directory to tarball mappings
+  """
+  import params
+
+  # the data directory is always a symlink to the "correct" data directory in /var/lib/knox
+  # such as /var/lib/knox/data or /var/lib/knox/data-2.4.0.0-1234
+  knox_data_dir = STACK_ROOT_DEFAULT + '/current/knox-server/data'
+
+  directories = { knox_data_dir: BACKUP_DATA_ARCHIVE }
+
+  Logger.info(format("Knox directories to backup:\n{directories}"))
+  return directories

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/input.config-knox.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/input.config-knox.json.j2 b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/input.config-knox.json.j2
new file mode 100644
index 0000000..6d7cf72
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/input.config-knox.json.j2
@@ -0,0 +1,60 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"knox_gateway",
+      "rowtype":"service",
+      "path":"/var/log/knox/gateway.log"
+    },
+    {
+      "type":"knox_cli",
+      "rowtype":"service",
+      "path":"/var/log/knox/knoxcli.log"
+    },
+    {
+      "type":"knox_ldap",
+      "rowtype":"service",
+      "path":"/var/log/knox/ldap.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "knox_gateway",
+            "knox_cli",
+            "knox_ldap"
+          ]
+        }
+      },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/krb5JAASLogin.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/krb5JAASLogin.conf.j2 b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/krb5JAASLogin.conf.j2
new file mode 100644
index 0000000..fa3237b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/templates/krb5JAASLogin.conf.j2
@@ -0,0 +1,30 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+com.sun.security.jgss.initiate {
+com.sun.security.auth.module.Krb5LoginModule required
+renewTGT=true
+doNotPrompt=true
+useKeyTab=true
+keyTab="{{knox_keytab_path}}"
+principal="{{knox_principal_name}}"
+isInitiator=true
+storeKey=true
+useTicketCache=true
+client=true;
+};
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/role_command_order.json
new file mode 100644
index 0000000..c0475e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/role_command_order.json
@@ -0,0 +1,7 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for KNOX",
+    "KNOX_GATEWAY-START" : ["RANGER_USERSYNC-START", "NAMENODE-START"],
+    "KNOX_SERVICE_CHECK-SERVICE_CHECK" : ["KNOX_GATEWAY-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/stacks/HDP/3.0/services/KNOX/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/KNOX/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/KNOX/metainfo.xml
new file mode 100644
index 0000000..d8054b1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/KNOX/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>KNOX</name>
+            <version>0.5.0.3.0</version>
+            <extends>common-services/KNOX/0.5.0.3.0</extends>
+        </service>
+    </services>
+</metainfo>


[25/25] ambari git commit: Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714

Posted by jl...@apache.org.
Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/922f3080
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/922f3080
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/922f3080

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 922f3080bb136fd9b1f817d63aa04e590946230f
Parents: e90de83 bba703b
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed May 17 13:51:05 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed May 17 13:51:05 2017 -0700

----------------------------------------------------------------------
 .../resources/ui/admin-web/app/views/main.html  |  50 +-
 ambari-infra/ambari-infra-assembly/pom.xml      |  91 ++++
 .../src/main/package/deb/manager/control        |  22 +
 .../src/main/package/deb/manager/postinst       |  15 +
 .../src/main/package/deb/manager/postrm         |  15 +
 .../src/main/package/deb/manager/preinst        |  15 +
 .../src/main/package/deb/manager/prerm          |  15 +
 ambari-infra/ambari-infra-manager/README.md     |   6 +
 ambari-infra/ambari-infra-manager/build.xml     |  54 +++
 .../ambari-infra-manager/docker/Dockerfile      |  52 +++
 .../ambari-infra-manager/docker/bin/start.sh    |  21 +
 .../docker/infra-manager-docker.sh              |  85 ++++
 ambari-infra/ambari-infra-manager/pom.xml       |  43 +-
 .../org/apache/ambari/infra/InfraManager.java   |   1 -
 .../conf/batch/InfraManagerBatchConfig.java     |   2 +-
 .../src/main/resources/infra-manager-env.sh     |  18 +
 .../src/main/resources/infraManager.sh          |  20 +
 ambari-logsearch/README.md                      |   1 +
 .../ambari-logsearch-config-api/pom.xml         |   2 +-
 .../config/api/InputConfigMonitor.java          |   4 +-
 .../config/api/LogLevelFilterMonitor.java       |  44 ++
 .../logsearch/config/api/LogSearchConfig.java   |  57 ++-
 .../model/loglevelfilter/LogLevelFilter.java    |  79 ++++
 .../model/loglevelfilter/LogLevelFilterMap.java |  33 ++
 .../config/api/LogSearchConfigClass1.java       |  21 +-
 .../config/api/LogSearchConfigClass2.java       |  21 +-
 .../ambari-logsearch-config-zookeeper/pom.xml   |   4 +
 .../config/zookeeper/LogSearchConfigZK.java     | 191 +++++---
 ambari-logsearch/ambari-logsearch-it/pom.xml    |   6 +
 .../story/LogSearchBackendStories.java          |  19 +-
 .../logsearch/story/LogSearchStoryLocator.java  |  97 ++++
 .../logsearch/story/LogSearchUIStories.java     |  10 +-
 .../org/apache/ambari/logfeeder/LogFeeder.java  |   6 +-
 .../logfeeder/input/InputConfigUploader.java    |   2 +-
 .../logfeeder/logconfig/FilterLogData.java      |  87 ----
 .../logfeeder/logconfig/LogConfigFetcher.java   | 168 -------
 .../logfeeder/logconfig/LogConfigHandler.java   | 213 ---------
 .../logfeeder/logconfig/LogFeederFilter.java    |  90 ----
 .../logconfig/LogFeederFilterWrapper.java       |  55 ---
 .../logfeeder/loglevelfilter/FilterLogData.java |  73 +++
 .../loglevelfilter/LogLevelFilterHandler.java   | 157 +++++++
 .../logfeeder/metrics/LogFeederAMSClient.java   |  12 +-
 .../ambari/logfeeder/output/OutputManager.java  |   2 +-
 .../ambari/logfeeder/util/LogFeederUtil.java    |  19 -
 .../logconfig/LogConfigHandlerTest.java         |  90 ++--
 .../src/test/resources/logfeeder.properties     |   3 +-
 .../configurer/LogfeederFilterConfigurer.java   |  66 ---
 .../ambari/logsearch/dao/UserConfigSolrDao.java |  79 ----
 .../ambari/logsearch/doc/DocConstants.java      |  10 +-
 .../logsearch/manager/ShipperConfigManager.java |  45 +-
 .../logsearch/manager/UserConfigManager.java    |  24 -
 .../model/common/LSServerLogLevelFilter.java    | 100 ++++
 .../model/common/LSServerLogLevelFilterMap.java |  65 +++
 .../model/common/LogFeederDataMap.java          |  50 --
 .../model/common/LogfeederFilterData.java       |  87 ----
 .../logsearch/rest/ShipperConfigResource.java   |  43 +-
 .../logsearch/rest/UserConfigResource.java      |  18 -
 .../webapp/templates/common/Header_tmpl.html    |   5 +-
 ambari-metrics/ambari-metrics-assembly/pom.xml  |  20 +
 .../src/main/assembly/monitor-windows.xml       |   7 +
 .../src/main/assembly/monitor.xml               |   9 +-
 .../timeline/AbstractTimelineMetricsSink.java   |  24 +-
 .../sink/timeline/AggregationResult.java        |  60 +++
 .../metrics2/sink/timeline/MetricAggregate.java | 110 +++++
 .../sink/timeline/MetricClusterAggregate.java   |  73 +++
 .../sink/timeline/MetricHostAggregate.java      |  81 ++++
 .../metrics2/sink/timeline/TimelineMetric.java  |   6 +-
 .../TimelineMetricWithAggregatedValues.java     |  65 +++
 .../AbstractTimelineMetricSinkTest.java         |  10 +
 .../availability/MetricCollectorHATest.java     |  10 +
 .../cache/HandleConnectExceptionTest.java       |  10 +
 .../sink/flume/FlumeTimelineMetricsSink.java    |  16 +
 .../timeline/HadoopTimelineMetricsSink.java     |  20 +-
 .../conf/unix/log4j.properties                  |  31 ++
 .../conf/windows/log4j.properties               |  29 ++
 .../ambari-metrics-host-aggregator/pom.xml      | 120 +++++
 .../AbstractMetricPublisherThread.java          | 134 ++++++
 .../aggregator/AggregatedMetricsPublisher.java  | 101 ++++
 .../host/aggregator/AggregatorApplication.java  | 180 ++++++++
 .../host/aggregator/AggregatorWebService.java   |  56 +++
 .../host/aggregator/RawMetricsPublisher.java    |  60 +++
 .../host/aggregator/TimelineMetricsHolder.java  |  98 ++++
 .../conf/unix/ambari-metrics-monitor            |   2 +-
 .../src/main/python/core/aggregator.py          | 110 +++++
 .../src/main/python/core/config_reader.py       |  35 +-
 .../src/main/python/core/controller.py          |  28 ++
 .../src/main/python/core/emitter.py             |   8 +-
 .../src/main/python/core/stop_handler.py        |   3 +-
 .../src/main/python/main.py                     |   6 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |  17 +
 .../storm/StormTimelineMetricsReporter.java     |  14 +
 .../sink/storm/StormTimelineMetricsSink.java    |  14 +
 .../storm/StormTimelineMetricsReporter.java     |  16 +
 .../sink/storm/StormTimelineMetricsSink.java    |  16 +
 .../timeline/HBaseTimelineMetricStore.java      |  29 +-
 .../metrics/timeline/PhoenixHBaseAccessor.java  |   4 +-
 .../timeline/TimelineMetricConfiguration.java   |   2 +
 .../metrics/timeline/TimelineMetricStore.java   |   2 +
 .../timeline/TimelineMetricsAggregatorSink.java |   4 +-
 .../timeline/aggregators/MetricAggregate.java   | 110 -----
 .../aggregators/MetricClusterAggregate.java     |  73 ---
 .../aggregators/MetricHostAggregate.java        |  81 ----
 .../TimelineMetricAppAggregator.java            |   1 +
 .../TimelineMetricClusterAggregator.java        |   2 +
 .../TimelineMetricClusterAggregatorSecond.java  |   1 +
 .../TimelineMetricHostAggregator.java           |   1 +
 .../aggregators/TimelineMetricReadHelper.java   |   2 +
 .../webapp/TimelineWebServices.java             |  31 ++
 .../timeline/ITPhoenixHBaseAccessor.java        |   4 +-
 .../metrics/timeline/MetricTestHelper.java      |   2 +-
 .../timeline/PhoenixHBaseAccessorTest.java      |   4 +-
 .../timeline/TestMetricHostAggregate.java       |   8 +-
 .../timeline/TestTimelineMetricStore.java       |   6 +
 .../TimelineMetricsAggregatorMemorySink.java    |   4 +-
 .../aggregators/ITClusterAggregator.java        |   4 +-
 .../aggregators/ITMetricAggregator.java         |  13 +-
 ...melineMetricClusterAggregatorSecondTest.java |   1 +
 ambari-metrics/pom.xml                          |   1 +
 .../internal/StageResourceProvider.java         |  81 +---
 .../system/impl/AmbariMetricSinkImpl.java       |  10 +
 .../server/upgrade/UpgradeCatalog251.java       |  47 +-
 .../server/upgrade/UpgradeCatalog300.java       |  15 +
 ambari-server/src/main/python/ambari-server.py  | 299 +++++++-----
 .../main/python/ambari_server/setupMpacks.py    |   7 +-
 .../1.6.1.2.2.0/package/scripts/params.py       |   2 +
 .../hadoop-metrics2-accumulo.properties.j2      |   3 +
 .../0.1.0/configuration/ams-env.xml             |   8 +
 .../0.1.0/configuration/ams-site.xml            |  11 +
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |   3 +
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |  30 ++
 .../0.1.0/package/scripts/params.py             |   5 +
 .../hadoop-metrics2-hbase.properties.j2         |   3 +
 .../package/templates/metric_monitor.ini.j2     |   7 +
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |   3 +
 .../templates/flume-metrics2.properties.j2      |   2 +
 .../0.96.0.2.0/package/scripts/params_linux.py  |   3 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |   2 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |   2 +
 .../hadoop-metrics2.properties.xml              |   2 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |   2 +
 .../hadoop-metrics2-hivemetastore.properties.j2 |   2 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |   2 +
 .../templates/hadoop-metrics2-llapdaemon.j2     |   2 +
 .../hadoop-metrics2-llaptaskscheduler.j2        |   2 +
 .../2.1.0.3.0/package/scripts/params_linux.py   |   3 +
 .../hadoop-metrics2-hivemetastore.properties.j2 |   2 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |   2 +
 .../templates/hadoop-metrics2-llapdaemon.j2     |   2 +
 .../hadoop-metrics2-llaptaskscheduler.j2        |   2 +
 .../KAFKA/0.8.1/configuration/kafka-broker.xml  |  11 +
 .../KAFKA/0.8.1/package/scripts/params.py       |   3 +
 .../common-services/KNOX/0.5.0.3.0/alerts.json  |  32 ++
 .../0.5.0.3.0/configuration/admin-topology.xml  |  97 ++++
 .../0.5.0.3.0/configuration/gateway-log4j.xml   | 110 +++++
 .../0.5.0.3.0/configuration/gateway-site.xml    |  71 +++
 .../KNOX/0.5.0.3.0/configuration/knox-env.xml   |  83 ++++
 .../configuration/knoxsso-topology.xml          | 126 +++++
 .../KNOX/0.5.0.3.0/configuration/ldap-log4j.xml |  93 ++++
 .../configuration/ranger-knox-audit.xml         | 132 ++++++
 .../ranger-knox-plugin-properties.xml           | 132 ++++++
 .../configuration/ranger-knox-policymgr-ssl.xml |  66 +++
 .../configuration/ranger-knox-security.xml      |  64 +++
 .../KNOX/0.5.0.3.0/configuration/topology.xml   | 174 +++++++
 .../KNOX/0.5.0.3.0/configuration/users-ldif.xml | 140 ++++++
 .../KNOX/0.5.0.3.0/kerberos.json                |  81 ++++
 .../common-services/KNOX/0.5.0.3.0/metainfo.xml | 109 +++++
 .../package/files/validateKnoxStatus.py         |  43 ++
 .../KNOX/0.5.0.3.0/package/scripts/knox.py      | 192 ++++++++
 .../0.5.0.3.0/package/scripts/knox_gateway.py   | 220 +++++++++
 .../KNOX/0.5.0.3.0/package/scripts/knox_ldap.py |  59 +++
 .../KNOX/0.5.0.3.0/package/scripts/params.py    |  29 ++
 .../0.5.0.3.0/package/scripts/params_linux.py   | 457 +++++++++++++++++++
 .../0.5.0.3.0/package/scripts/params_windows.py |  71 +++
 .../0.5.0.3.0/package/scripts/service_check.py  |  96 ++++
 .../package/scripts/setup_ranger_knox.py        | 121 +++++
 .../0.5.0.3.0/package/scripts/status_params.py  |  59 +++
 .../KNOX/0.5.0.3.0/package/scripts/upgrade.py   | 118 +++++
 .../package/templates/input.config-knox.json.j2 |  60 +++
 .../package/templates/krb5JAASLogin.conf.j2     |  30 ++
 .../KNOX/0.5.0.3.0/role_command_order.json      |   7 +
 .../KNOX/0.5.0.3.0/service_advisor.py           | 253 ++++++++++
 .../configuration/logfeeder-properties.xml      |  10 +
 .../configuration/logsearch-properties.xml      |  10 -
 .../LOGSEARCH/0.5.0/themes/theme.json           |   4 +-
 .../scripts/alerts/alert_spark_livy_port.py     |   8 +-
 .../SPARK/1.2.1/package/scripts/params.py       |   1 +
 .../1.2.1/package/scripts/service_check.py      |   2 +-
 .../scripts/alerts/alert_spark2_livy_port.py    |   8 +-
 .../SPARK2/2.0.0/package/scripts/params.py      |   1 +
 .../2.0.0/package/scripts/service_check.py      |   2 +-
 .../sqoop-atlas-application.properties.xml      |  47 ++
 .../SQOOP/1.4.4.3.0/configuration/sqoop-env.xml |  87 ++++
 .../1.4.4.3.0/configuration/sqoop-site.xml      |  38 ++
 .../SQOOP/1.4.4.3.0/kerberos.json               |  20 +
 .../SQOOP/1.4.4.3.0/metainfo.xml                | 115 +++++
 .../SQOOP/1.4.4.3.0/package/scripts/__init__.py |  19 +
 .../SQOOP/1.4.4.3.0/package/scripts/params.py   |  27 ++
 .../1.4.4.3.0/package/scripts/params_linux.py   | 135 ++++++
 .../1.4.4.3.0/package/scripts/params_windows.py |  30 ++
 .../1.4.4.3.0/package/scripts/service_check.py  |  62 +++
 .../SQOOP/1.4.4.3.0/package/scripts/sqoop.py    | 124 +++++
 .../1.4.4.3.0/package/scripts/sqoop_client.py   |  66 +++
 .../SQOOP/1.4.4.3.0/role_command_order.json     |   6 +
 .../SQOOP/1.4.4.3.0/service_advisor.py          | 197 ++++++++
 .../STORM/0.9.1/package/scripts/params_linux.py |   2 +
 .../0.9.1/package/templates/config.yaml.j2      |   2 +
 .../templates/storm-metrics2.properties.j2      |   2 +
 .../custom_actions/scripts/ru_execute_tasks.py  |   2 +
 .../2.0.6/hooks/before-START/scripts/params.py  |   3 +
 .../templates/hadoop-metrics2.properties.j2     |   2 +
 .../stacks/HDP/2.3/services/stack_advisor.py    |   2 +-
 .../hadoop-metrics2.properties.xml              |   2 +
 .../3.0/hooks/before-START/scripts/params.py    |   2 +
 .../templates/hadoop-metrics2.properties.j2     |   2 +
 .../stacks/HDP/3.0/services/KNOX/metainfo.xml   |  27 ++
 .../stacks/HDP/3.0/services/SQOOP/metainfo.xml  |  27 ++
 .../system/impl/TestAmbariMetricsSinkImpl.java  |  10 +
 .../server/upgrade/UpgradeCatalog251Test.java   |  92 ++++
 .../server/upgrade/UpgradeCatalog300Test.java   |  29 ++
 .../src/test/python/TestAmbariServer.py         | 409 +++++++++--------
 ambari-web/app/app.js                           |   7 +
 .../main/admin/stack_and_upgrade_controller.js  |   9 +-
 ambari-web/app/controllers/wizard.js            |   6 +-
 .../app/controllers/wizard/step7_controller.js  |   1 +
 .../app/controllers/wizard/step8_controller.js  | 167 ++++++-
 ambari-web/app/messages.js                      |   3 +-
 .../app/mixins/common/configs/configs_saver.js  |  32 +-
 ambari-web/app/templates/wizard/step8.hbs       |   5 +
 ambari-web/app/utils/ajax/ajax.js               |   4 +-
 .../admin/stack_and_upgrade_controller_test.js  |   6 +
 .../test/controllers/wizard/step8_test.js       | 132 ++++--
 .../mixins/common/configs/configs_saver_test.js |  13 +
 .../2.0/hooks/before-START/scripts/params.py    |   2 +
 233 files changed, 8682 insertions(+), 1916 deletions(-)
----------------------------------------------------------------------



[13/25] ambari git commit: AMBARI-20881 Add Log Level Filter to the Log Search config API (mgergely)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogfeederFilterConfigurer.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogfeederFilterConfigurer.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogfeederFilterConfigurer.java
deleted file mode 100644
index c2d27f9..0000000
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/configurer/LogfeederFilterConfigurer.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.configurer;
-
-import org.apache.ambari.logsearch.conf.SolrPropsConfig;
-import org.apache.ambari.logsearch.conf.global.SolrCollectionState;
-import org.apache.ambari.logsearch.dao.UserConfigSolrDao;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LogfeederFilterConfigurer implements Configurer {
-
-  private static final Logger LOG = LoggerFactory.getLogger(LogfeederFilterConfigurer.class);
-
-  private static final int SETUP_RETRY_SECOND = 10;
-
-  private final UserConfigSolrDao userConfigSolrDao;
-
-  public LogfeederFilterConfigurer(final UserConfigSolrDao userConfigSolrDao) {
-    this.userConfigSolrDao = userConfigSolrDao;
-  }
-
-  @Override
-  public void start() {
-    final SolrPropsConfig solrPropsConfig = userConfigSolrDao.getSolrPropsConfig();
-    final SolrCollectionState state = userConfigSolrDao.getSolrCollectionState();
-    Thread setupFiltersThread = new Thread("logfeeder_filter_setup") {
-      @Override
-      public void run() {
-        LOG.info("logfeeder_filter_setup thread started (to upload logfeeder config)");
-        while (true) {
-          int retryCount = 0;
-          try {
-            retryCount++;
-            Thread.sleep(SETUP_RETRY_SECOND * 1000);
-            if (state.isSolrCollectionReady()) {
-              LOG.info("Tries to initialize logfeeder filters in '{}' collection", solrPropsConfig.getCollection());
-              userConfigSolrDao.getUserFilter();
-              break;
-            }
-          } catch (Exception e) {
-            LOG.error("Not able to save logfeeder filter while initialization, retryCount=" + retryCount, e);
-          }
-        }
-      }
-    };
-    setupFiltersThread.setDaemon(true);
-    setupFiltersThread.start();
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/UserConfigSolrDao.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/UserConfigSolrDao.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/UserConfigSolrDao.java
index 8fb27a7..a0e01a3 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/UserConfigSolrDao.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/dao/UserConfigSolrDao.java
@@ -20,45 +20,25 @@
 package org.apache.ambari.logsearch.dao;
 
 import java.io.IOException;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeMap;
 
 import javax.annotation.PostConstruct;
 import javax.inject.Inject;
 import javax.inject.Named;
 
-import org.apache.ambari.logsearch.common.HadoopServiceConfigHelper;
-import org.apache.ambari.logsearch.common.LogSearchConstants;
 import org.apache.ambari.logsearch.common.LogSearchContext;
 import org.apache.ambari.logsearch.common.LogType;
 import org.apache.ambari.logsearch.conf.SolrPropsConfig;
 import org.apache.ambari.logsearch.conf.SolrUserPropsConfig;
 import org.apache.ambari.logsearch.conf.global.SolrCollectionState;
-import org.apache.ambari.logsearch.configurer.LogfeederFilterConfigurer;
 import org.apache.ambari.logsearch.configurer.SolrCollectionConfigurer;
-import org.apache.ambari.logsearch.model.common.LogFeederDataMap;
-import org.apache.ambari.logsearch.model.common.LogfeederFilterData;
-import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.UpdateResponse;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.SolrInputDocument;
 
-import org.apache.ambari.logsearch.util.JSONUtil;
-import org.apache.commons.collections.CollectionUtils;
 import org.apache.log4j.Logger;
 import org.springframework.data.solr.core.SolrTemplate;
 
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.ID;
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.USER_NAME;
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.VALUES;
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.FILTER_NAME;
-import static org.apache.ambari.logsearch.solr.SolrConstants.UserConfigConstants.ROW_TYPE;
-
 @Named
 public class UserConfigSolrDao extends SolrDaoBase {
 
@@ -99,25 +79,12 @@ public class UserConfigSolrDao extends SolrDaoBase {
 
     try {
       new SolrCollectionConfigurer(this, false).start();
-      new LogfeederFilterConfigurer(this).start();
     } catch (Exception e) {
       LOG.error("error while connecting to Solr for history logs : solrUrl=" + solrUrl + ", zkConnectString=" + zkConnectString +
           ", collection=" + collection, e);
     }
   }
 
-  public void saveUserFilter(LogFeederDataMap logfeederFilterWrapper) throws SolrException, SolrServerException, IOException {
-    String filterName = LogSearchConstants.LOGFEEDER_FILTER_NAME;
-    String json = JSONUtil.objToJson(logfeederFilterWrapper);
-    SolrInputDocument configDocument = new SolrInputDocument();
-    configDocument.addField(ID, logfeederFilterWrapper.getId());
-    configDocument.addField(ROW_TYPE, filterName);
-    configDocument.addField(VALUES, json);
-    configDocument.addField(USER_NAME, filterName);
-    configDocument.addField(FILTER_NAME, filterName);
-    addDocs(configDocument);
-  }
-
   public void deleteUserConfig(String id) throws SolrException, SolrServerException, IOException {
     removeDoc("id:" + id);
   }
@@ -138,52 +105,6 @@ public class UserConfigSolrDao extends SolrDaoBase {
     return updateResoponse;
   }
 
-  public LogFeederDataMap getUserFilter() throws SolrServerException, IOException {
-    SolrQuery solrQuery = new SolrQuery();
-    solrQuery.setQuery("*:*");
-    solrQuery.setFilterQueries(ROW_TYPE + ":" + LogSearchConstants.LOGFEEDER_FILTER_NAME);
-
-    QueryResponse response = process(solrQuery);
-    SolrDocumentList documentList = response.getResults();
-    LogFeederDataMap logfeederDataMap = null;
-    if (CollectionUtils.isNotEmpty(documentList)) {
-      SolrDocument configDoc = documentList.get(0);
-      String json = (String) configDoc.get(VALUES);
-      logfeederDataMap = (LogFeederDataMap) JSONUtil.jsonToObj(json, LogFeederDataMap.class);
-      logfeederDataMap.setId("" + configDoc.get(ID));
-    } else {
-      logfeederDataMap = new LogFeederDataMap();
-      logfeederDataMap.setFilter(new TreeMap<String, LogfeederFilterData>());
-      logfeederDataMap.setId(Long.toString(System.currentTimeMillis()));
-    }
-    
-    addMissingFilters(logfeederDataMap);
-    
-    return logfeederDataMap;
-  }
-
-  private void addMissingFilters(LogFeederDataMap logfeederDataMap) throws SolrServerException, IOException {
-    Set<String> logIds = HadoopServiceConfigHelper.getAllLogIds();
-    if (logIds != null) {
-      List<String> logfeederDefaultLevels = solrUserConfig.getLogLevels();
-      
-      boolean modified = false;
-      for (String logId : logIds) {
-        if (!logfeederDataMap.getFilter().containsKey(logId)) {
-          LogfeederFilterData logfeederFilterData = new LogfeederFilterData();
-          logfeederFilterData.setLabel(logId);
-          logfeederFilterData.setDefaultLevels(logfeederDefaultLevels);
-          logfeederDataMap.getFilter().put(logId, logfeederFilterData);
-          modified = true;
-        }
-      }
-      
-      if (modified) {
-        saveUserFilter(logfeederDataMap);
-      }
-    }
-  }
-
   @Override
   public SolrCollectionState getSolrCollectionState() {
     return solrUserConfigState;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
index 00adb67..885771d 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/doc/DocConstants.java
@@ -113,15 +113,15 @@ public class DocConstants {
     public static final String SAVE_USER_CONFIG_OD = "Save user config";
     public static final String DELETE_USER_CONFIG_OD = "Delete user config";
     public static final String GET_USER_CONFIG_OD = "Get user config";
-    public static final String GET_USER_FILTER_OD = "Get user filter";
-    public static final String UPDATE_USER_FILTER_OD = "Update user filter";
     public static final String GET_ALL_USER_NAMES_OD = "Get all user names";
   }
 
   public class ShipperConfigOperationDescriptions {
-    public static final String GET_SERVICE_NAMES = "Get service names";
-    public static final String GET_SHIPPER_CONFIG = "Get shipper config";
-    public static final String SET_SHIPPER_CONFIG = "Set shipper config";
+    public static final String GET_SERVICE_NAMES_OD = "Get service names";
+    public static final String GET_SHIPPER_CONFIG_OD = "Get shipper config";
+    public static final String SET_SHIPPER_CONFIG_OD = "Set shipper config";
+    public static final String GET_LOG_LEVEL_FILTER_OD = "Get log level filter";
+    public static final String UPDATE_LOG_LEVEL_FILTER_OD = "Update log level filter";
   }
 
   public class StatusOperationDescriptions {

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
index c0c1167..1118233 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/ShipperConfigManager.java
@@ -22,11 +22,15 @@ package org.apache.ambari.logsearch.manager;
 import java.util.List;
 
 import org.apache.ambari.logsearch.configurer.LogSearchConfigConfigurer;
+import org.apache.ambari.logsearch.model.common.LSServerLogLevelFilterMap;
 import org.apache.log4j.Logger;
 
+import com.google.common.collect.ImmutableMap;
+
 import javax.annotation.PostConstruct;
 import javax.inject.Inject;
 import javax.inject.Named;
+import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 
 @Named
@@ -50,12 +54,51 @@ public class ShipperConfigManager extends JsonManagerBase {
     return LogSearchConfigConfigurer.getConfig().getInputConfig(clusterName, serviceName);
   }
 
+  public Response createInputConfig(String clusterName, String serviceName, String inputConfig) {
+    
+    try {
+      if (LogSearchConfigConfigurer.getConfig().inputConfigExists(clusterName, serviceName)) {
+        return Response.serverError()
+            .type(MediaType.APPLICATION_JSON)
+            .entity(ImmutableMap.of("errorMessage", "Input config already exists for service " + serviceName))
+            .build();
+      }
+      
+      LogSearchConfigConfigurer.getConfig().createInputConfig(clusterName, serviceName, inputConfig);
+      return Response.ok().build();
+    } catch (Exception e) {
+      logger.warn("Could not create input config", e);
+      return Response.serverError().build();
+    }
+  }
+
   public Response setInputConfig(String clusterName, String serviceName, String inputConfig) {
     try {
+      if (!LogSearchConfigConfigurer.getConfig().inputConfigExists(clusterName, serviceName)) {
+        return Response.serverError()
+            .type(MediaType.APPLICATION_JSON)
+            .entity(ImmutableMap.of("errorMessage", "Input config doesn't exist for service " + serviceName))
+            .build();
+      }
+      
       LogSearchConfigConfigurer.getConfig().setInputConfig(clusterName, serviceName, inputConfig);
       return Response.ok().build();
     } catch (Exception e) {
-      logger.warn("Could not write input config", e);
+      logger.warn("Could not update input config", e);
+      return Response.serverError().build();
+    }
+  }
+
+  public LSServerLogLevelFilterMap getLogLevelFilters(String clusterName) {
+    return new LSServerLogLevelFilterMap(LogSearchConfigConfigurer.getConfig().getLogLevelFilters(clusterName));
+  }
+
+  public Response setLogLevelFilters(String clusterName, LSServerLogLevelFilterMap request) {
+    try {
+      LogSearchConfigConfigurer.getConfig().setLogLevelFilters(clusterName, request.convertToApi());
+      return Response.ok().build();
+    } catch (Exception e) {
+      logger.warn("Could not update log level filters", e);
       return Response.serverError().build();
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/UserConfigManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/UserConfigManager.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/UserConfigManager.java
index a60fc5c..1df9f5a 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/UserConfigManager.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/manager/UserConfigManager.java
@@ -27,7 +27,6 @@ import java.util.List;
 import org.apache.ambari.logsearch.common.LogSearchContext;
 import org.apache.ambari.logsearch.common.MessageEnums;
 import org.apache.ambari.logsearch.dao.UserConfigSolrDao;
-import org.apache.ambari.logsearch.model.common.LogFeederDataMap;
 import org.apache.ambari.logsearch.model.request.impl.UserConfigRequest;
 import org.apache.ambari.logsearch.model.response.UserConfigData;
 import org.apache.ambari.logsearch.model.response.UserConfigDataListResponse;
@@ -176,29 +175,6 @@ public class UserConfigManager extends JsonManagerBase {
 
   }
 
-  // ////////////////////////////LEVEL FILTER/////////////////////////////////////
-
-  public LogFeederDataMap getUserFilter() {
-    LogFeederDataMap userFilter;
-    try {
-      userFilter = userConfigSolrDao.getUserFilter();
-    } catch (SolrServerException | IOException e) {
-      logger.error(e);
-      throw RESTErrorUtil.createRESTException(MessageEnums.SOLR_ERROR.getMessage().getMessage(), MessageEnums.ERROR_SYSTEM);
-    }
-    return userFilter;
-  }
-
-  public LogFeederDataMap saveUserFiter(LogFeederDataMap logfeederFilters) {
-    try {
-      userConfigSolrDao.saveUserFilter(logfeederFilters);
-    } catch (SolrException | SolrServerException | IOException e) {
-      logger.error("user config not able to save", e);
-      throw RESTErrorUtil.createRESTException(MessageEnums.SOLR_ERROR.getMessage().getMessage(), MessageEnums.ERROR_SYSTEM);
-    }
-    return getUserFilter();
-  }
-
   public List<String> getAllUserName() {
     List<String> userList = new ArrayList<String>();
     try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilter.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilter.java
new file mode 100644
index 0000000..2a00802
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilter.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.Date;
+import java.util.List;
+
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel
+public class LSServerLogLevelFilter {
+
+  @ApiModelProperty private String label;
+  @ApiModelProperty private List<String> hosts;
+  @ApiModelProperty private List<String> defaultLevels;
+  @ApiModelProperty private List<String> overrideLevels;
+  @ApiModelProperty private Date expiryTime;
+
+  public LSServerLogLevelFilter() {}
+
+  public LSServerLogLevelFilter(LogLevelFilter logLevelFilter) {
+    label = logLevelFilter.getLabel();
+    hosts = logLevelFilter.getHosts();
+    defaultLevels = logLevelFilter.getDefaultLevels();
+    overrideLevels = logLevelFilter.getOverrideLevels();
+    expiryTime = logLevelFilter.getExpiryTime();
+  }
+
+  public String getLabel() {
+    return label;
+  }
+
+  public void setLabel(String label) {
+    this.label = label;
+  }
+
+  public List<String> getHosts() {
+    return hosts;
+  }
+
+  public void setHosts(List<String> hosts) {
+    this.hosts = hosts;
+  }
+
+  public List<String> getDefaultLevels() {
+    return defaultLevels;
+  }
+
+  public void setDefaultLevels(List<String> defaultLevels) {
+    this.defaultLevels = defaultLevels;
+  }
+
+  public List<String> getOverrideLevels() {
+    return overrideLevels;
+  }
+
+  public void setOverrideLevels(List<String> overrideLevels) {
+    this.overrideLevels = overrideLevels;
+  }
+
+  public Date getExpiryTime() {
+    return expiryTime;
+  }
+
+  public void setExpiryTime(Date expiryTime) {
+    this.expiryTime = expiryTime;
+  }
+
+  public LogLevelFilter convertToApi() {
+    LogLevelFilter apiFilter = new LogLevelFilter();
+    
+    apiFilter.setLabel(label);
+    apiFilter.setHosts(hosts);
+    apiFilter.setDefaultLevels(defaultLevels);
+    apiFilter.setOverrideLevels(overrideLevels);
+    apiFilter.setExpiryTime(expiryTime);
+    
+    return apiFilter;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilterMap.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilterMap.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilterMap.java
new file mode 100644
index 0000000..3088db1
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LSServerLogLevelFilterMap.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logsearch.model.common;
+
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+@ApiModel
+public class LSServerLogLevelFilterMap {
+
+  @ApiModelProperty
+  private TreeMap<String, LSServerLogLevelFilter> filter;
+
+  public LSServerLogLevelFilterMap() {}
+
+  public LSServerLogLevelFilterMap(LogLevelFilterMap logLevelFilterMap) {
+    filter = new TreeMap<>();
+    for (Map.Entry<String, LogLevelFilter> e : logLevelFilterMap.getFilter().entrySet()) {
+      filter.put(e.getKey(), new LSServerLogLevelFilter(e.getValue()));
+    }
+  }
+
+  public TreeMap<String, LSServerLogLevelFilter> getFilter() {
+    return filter;
+  }
+
+  public void setFilter(TreeMap<String, LSServerLogLevelFilter> filter) {
+    this.filter = filter;
+  }
+
+  public LogLevelFilterMap convertToApi() {
+    LogLevelFilterMap logLevelFilterMap = new LogLevelFilterMap();
+
+    TreeMap<String, LogLevelFilter> apiFilter = new TreeMap<>();
+    for (Map.Entry<String, LSServerLogLevelFilter> e : filter.entrySet()) {
+      apiFilter.put(e.getKey(), e.getValue().convertToApi());
+    }
+    logLevelFilterMap.setFilter(apiFilter);
+
+    return logLevelFilterMap;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogFeederDataMap.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogFeederDataMap.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogFeederDataMap.java
deleted file mode 100644
index cc7d53d..0000000
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogFeederDataMap.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.model.common;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.TreeMap;
-
-@ApiModel
-public class LogFeederDataMap {
-
-  @ApiModelProperty
-  private String id;
-
-  @ApiModelProperty
-  private TreeMap<String, LogfeederFilterData> filter;
-
-  public TreeMap<String, LogfeederFilterData> getFilter() {
-    return filter;
-  }
-
-  public void setFilter(TreeMap<String, LogfeederFilterData> filter) {
-    this.filter = filter;
-  }
-
-  public String getId() {
-    return id;
-  }
-
-  public void setId(String id) {
-    this.id = id;
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogfeederFilterData.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogfeederFilterData.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogfeederFilterData.java
deleted file mode 100644
index e0f8013..0000000
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/model/common/LogfeederFilterData.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logsearch.model.common;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.ArrayList;
-import java.util.List;
-
-@ApiModel
-public class LogfeederFilterData {
-
-  @ApiModelProperty
-  private String label;
-
-  @ApiModelProperty
-  private List<String> hosts = new ArrayList<>();
-
-  @ApiModelProperty
-  private List<String> defaultLevels = new ArrayList<>();
-
-  @ApiModelProperty
-  private List<String> overrideLevels = new ArrayList<>();
-
-  @ApiModelProperty
-  private String expiryTime;
-
-  public LogfeederFilterData() {
-  }
-
-  public String getLabel() {
-    return label;
-  }
-
-  public void setLabel(String label) {
-    this.label = label;
-  }
-
-  public List<String> getHosts() {
-    return hosts;
-  }
-
-  public void setHosts(List<String> hosts) {
-    this.hosts = hosts;
-  }
-
-  public List<String> getDefaultLevels() {
-    return defaultLevels;
-  }
-
-  public void setDefaultLevels(List<String> defaultLevels) {
-    this.defaultLevels = defaultLevels;
-  }
-
-  public List<String> getOverrideLevels() {
-    return overrideLevels;
-  }
-
-  public void setOverrideLevels(List<String> overrideLevels) {
-    this.overrideLevels = overrideLevels;
-  }
-
-  public String getExpiryTime() {
-    return expiryTime;
-  }
-
-  public void setExpiryTime(String expiryTime) {
-    this.expiryTime = expiryTime;
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
index 47e6ba2..342d1cf 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/ShipperConfigResource.java
@@ -22,6 +22,7 @@ package org.apache.ambari.logsearch.rest;
 import javax.inject.Inject;
 import javax.inject.Named;
 import javax.ws.rs.GET;
+import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
@@ -30,12 +31,18 @@ import javax.ws.rs.core.Response;
 
 import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiOperation;
+
 import org.apache.ambari.logsearch.manager.ShipperConfigManager;
+import org.apache.ambari.logsearch.model.common.LSServerLogLevelFilterMap;
 import org.springframework.context.annotation.Scope;
 
 import java.util.List;
 
-import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.*;
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.GET_LOG_LEVEL_FILTER_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.GET_SERVICE_NAMES_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.GET_SHIPPER_CONFIG_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.SET_SHIPPER_CONFIG_OD;
+import static org.apache.ambari.logsearch.doc.DocConstants.ShipperConfigOperationDescriptions.UPDATE_LOG_LEVEL_FILTER_OD;
 
 @Api(value = "shipper", description = "Shipper config operations")
 @Path("shipper")
@@ -49,7 +56,7 @@ public class ShipperConfigResource {
   @GET
   @Path("/input/{clusterName}/services")
   @Produces({"application/json"})
-  @ApiOperation(GET_SERVICE_NAMES)
+  @ApiOperation(GET_SERVICE_NAMES_OD)
   public List<String> getServices(@PathParam("clusterName") String clusterName) {
     return shipperConfigManager.getServices(clusterName);
   }
@@ -57,17 +64,43 @@ public class ShipperConfigResource {
   @GET
   @Path("/input/{clusterName}/services/{serviceName}")
   @Produces({"application/json"})
-  @ApiOperation(GET_SHIPPER_CONFIG)
+  @ApiOperation(GET_SHIPPER_CONFIG_OD)
   public String getShipperConfig(@PathParam("clusterName") String clusterName, @PathParam("serviceName") String serviceName) {
     return shipperConfigManager.getInputConfig(clusterName, serviceName);
   }
 
+  @POST
+  @Path("/input/{clusterName}/services/{serviceName}")
+  @Produces({"application/json"})
+  @ApiOperation(SET_SHIPPER_CONFIG_OD)
+  public Response createShipperConfig(String body, @PathParam("clusterName") String clusterName, @PathParam("serviceName")
+    String serviceName) {
+    return shipperConfigManager.createInputConfig(clusterName, serviceName, body);
+  }
+
   @PUT
   @Path("/input/{clusterName}/services/{serviceName}")
-  @Produces("text/plain")
-  @ApiOperation(SET_SHIPPER_CONFIG)
+  @Produces({"application/json"})
+  @ApiOperation(SET_SHIPPER_CONFIG_OD)
   public Response setShipperConfig(String body, @PathParam("clusterName") String clusterName, @PathParam("serviceName")
     String serviceName) {
     return shipperConfigManager.setInputConfig(clusterName, serviceName, body);
   }
+
+  @GET
+  @Path("/filters/{clusterName}/level")
+  @Produces({"application/json"})
+  @ApiOperation(GET_LOG_LEVEL_FILTER_OD)
+  public LSServerLogLevelFilterMap getLogLevelFilters(@PathParam("clusterName") String clusterName) {
+    return shipperConfigManager.getLogLevelFilters(clusterName);
+  }
+
+  @PUT
+  @Path("/filters/{clusterName}/level")
+  @Produces({"application/json"})
+  @ApiOperation(UPDATE_LOG_LEVEL_FILTER_OD)
+  public Response setogLevelFilter(LSServerLogLevelFilterMap request, @PathParam("clusterName") String clusterName) {
+    return shipperConfigManager.setLogLevelFilters(clusterName, request);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java
index 41dda05..00b971a 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/rest/UserConfigResource.java
@@ -25,7 +25,6 @@ import javax.ws.rs.BeanParam;
 import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
 import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
@@ -33,7 +32,6 @@ import javax.ws.rs.Produces;
 import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiOperation;
 import org.apache.ambari.logsearch.manager.UserConfigManager;
-import org.apache.ambari.logsearch.model.common.LogFeederDataMap;
 import org.apache.ambari.logsearch.model.request.impl.UserConfigRequest;
 import org.apache.ambari.logsearch.model.response.UserConfigData;
 import org.apache.ambari.logsearch.model.response.UserConfigDataListResponse;
@@ -74,22 +72,6 @@ public class UserConfigResource {
   }
 
   @GET
-  @Path("/filters")
-  @Produces({"application/json"})
-  @ApiOperation(GET_USER_FILTER_OD)
-  public LogFeederDataMap getUserFilter() {
-    return userConfigManager.getUserFilter();
-  }
-
-  @PUT
-  @Path("/filters")
-  @Produces({"application/json"})
-  @ApiOperation(UPDATE_USER_FILTER_OD)
-  public LogFeederDataMap updateUserFilter(LogFeederDataMap request) {
-    return userConfigManager.saveUserFiter(request);
-  }
-
-  @GET
   @Path("/names")
   @Produces({"application/json"})
   @ApiOperation(GET_ALL_USER_NAMES_OD)

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-web/src/main/webapp/templates/common/Header_tmpl.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/main/webapp/templates/common/Header_tmpl.html b/ambari-logsearch/ambari-logsearch-web/src/main/webapp/templates/common/Header_tmpl.html
index 5f1bbdb..24cc392 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/main/webapp/templates/common/Header_tmpl.html
+++ b/ambari-logsearch/ambari-logsearch-web/src/main/webapp/templates/common/Header_tmpl.html
@@ -34,11 +34,12 @@
                         <i class="fa fa-filter  pull-right"></i>
                     </a>
                 </li> -->
-                 <li class="dropdown" data-id="createFilters" title="Logfeeder Filters">
+<!-- TODO: update filters to support multiple clusters
+                <li class="dropdown" data-id="createFilters" title="Logfeeder Filters">
                     <a href="#" class="account excludeStatus" data-toggle="modal">
                         <i class="fa fa-filter"></i>
                     </a>
-                </li>
+                </li>-->
                 <li class="dropdown" title="Menu">
                     <a href="#" class="dropdown-toggle account" data-toggle="dropdown">
                         <!-- <div class="avatar">

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
index dc6037e..44fbd4d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
@@ -23,6 +23,7 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -52,6 +53,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.jdbc.support.JdbcUtils;
 
+import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 
@@ -333,6 +335,19 @@ public class UpgradeCatalog300 extends AbstractUpgradeCatalog {
 
             removeConfigurationPropertiesFromCluster(cluster, configType, removeProperties);
           }
+          
+          Config logSearchProperties = cluster.getDesiredConfigByType("logsearch-properties");
+          Config logFeederProperties = cluster.getDesiredConfigByType("logfeeder-properties");
+          if (logSearchProperties != null && logFeederProperties != null) {
+            String defaultLogLevels = logSearchProperties.getProperties().get("logsearch.logfeeder.include.default.level");
+            
+            Set<String> removeProperties = Sets.newHashSet("logsearch.logfeeder.include.default.level");
+            removeConfigurationPropertiesFromCluster(cluster, "logsearch-properties", removeProperties);
+            
+            Map<String, String> newProperties = new HashMap<>();
+            newProperties.put("logfeeder.include.default.level", defaultLogLevels);
+            updateConfigurationPropertiesForCluster(cluster, "logfeeder-properties", newProperties, true, true);
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-properties.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-properties.xml
index 1ff8ad3..a38f961 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-properties.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logfeeder-properties.xml
@@ -120,4 +120,14 @@
     <display-name>Input cache key field</display-name>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>logfeeder.include.default.level</name>
+    <value>FATAL,ERROR,WARN</value>
+    <description>Include default Log Feeder Log Levels for Log Search. Used for bootstrapping the configuration only. (levels: FATAL,ERROR,WARN,INFO,DEBUG,TRACE)</description>
+    <display-name>Log Feeder Log Levels</display-name>
+    <value-attributes>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
index a18c5c5..be586fd 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/configuration/logsearch-properties.xml
@@ -130,16 +130,6 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>logsearch.logfeeder.include.default.level</name>
-    <value>FATAL,ERROR,WARN</value>
-    <description>Include default Log Feeder Log Levels for Log Search. Used for bootstrapping the configuration only. (levels: FATAL,ERROR,WARN,INFO,DEBUG,TRACE)</description>
-    <display-name>Log Feeder Log Levels</display-name>
-    <value-attributes>
-      <editable-only-at-install>true</editable-only-at-install>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
     <name>logsearch.solr.metrics.collector.hosts</name>
     <value>{metrics_collector_hosts}</value>
     <description>Metrics collector hosts for pushing metrics by Log Search Solr</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
index 0adcbde..d36d89c 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/themes/theme.json
@@ -87,7 +87,7 @@
       "configuration-layout": "default",
       "configs": [
         {
-          "config" : "logsearch-properties/logsearch.logfeeder.include.default.level",
+          "config" : "logfeeder-properties/logfeeder.include.default.level",
           "subsection-name": "subsection-logsearch-server-col1"
         },
         {
@@ -353,7 +353,7 @@
         }
       },
       {
-        "config": "logsearch-properties/logsearch.logfeeder.include.default.level",
+        "config": "logfeeder-properties/logfeeder.include.default.level",
         "widget": {
           "type": "text-field"
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
index 8f1510b..c949ca2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
@@ -302,9 +302,32 @@ public class UpgradeCatalog300Test {
     expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(logSearchConfCapture), anyString(),
         EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).times(2);
 
+    Map<String, String> oldLogSearchProperties = ImmutableMap.of(
+        "logsearch.logfeeder.include.default.level", "FATAL,ERROR,WARN"
+    );
+
+    Map<String, String> expectedLogFeederProperties = ImmutableMap.of(
+        "logfeeder.include.default.level", "FATAL,ERROR,WARN"
+    );
+
+    Config logFeederPropertiesConf = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType("logfeeder-properties")).andReturn(logFeederPropertiesConf).times(2);
+    expect(logFeederPropertiesConf.getProperties()).andReturn(Collections.<String, String> emptyMap()).once();
+    Capture<Map<String, String>> logFeederPropertiesCapture = EasyMock.newCapture();
+    expect(controller.createConfig(anyObject(Cluster.class), eq("logfeeder-properties"), capture(logFeederPropertiesCapture),
+        anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+
+    Config logSearchPropertiesConf = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType("logsearch-properties")).andReturn(logSearchPropertiesConf).times(2);
+    expect(logSearchPropertiesConf.getProperties()).andReturn(oldLogSearchProperties).times(2);
+    Capture<Map<String, String>> logSearchPropertiesCapture = EasyMock.newCapture();
+    expect(controller.createConfig(anyObject(Cluster.class), eq("logsearch-properties"), capture(logSearchPropertiesCapture),
+        anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+
     replay(clusters, cluster);
     replay(controller, injector2);
     replay(confSomethingElse1, confSomethingElse2, confLogSearchConf1, confLogSearchConf2);
+    replay(logSearchPropertiesConf, logFeederPropertiesConf);
     new UpgradeCatalog300(injector2).updateLogSearchConfigs();
     easyMockSupport.verifyAll();
 
@@ -313,5 +336,11 @@ public class UpgradeCatalog300Test {
     for (Map<String, String> updatedLogSearchConf : updatedLogSearchConfs) {
       assertTrue(Maps.difference(Collections.<String, String> emptyMap(), updatedLogSearchConf).areEqual());
     }
+    
+    Map<String,String> newLogFeederProperties = logFeederPropertiesCapture.getValue();
+    assertTrue(Maps.difference(expectedLogFeederProperties, newLogFeederProperties).areEqual());
+    
+    Map<String,String> newLogSearchProperties = logSearchPropertiesCapture.getValue();
+    assertTrue(Maps.difference(Collections.<String, String> emptyMap(), newLogSearchProperties).areEqual());
   }
 }


[12/25] ambari git commit: AMBARI-21036. HDP 3.0 TP - create Service Advisor for Knox.(vbrodetsky)

Posted by jl...@apache.org.
AMBARI-21036. HDP 3.0 TP - create Service Advisor for Knox.(vbrodetsky)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f18a822c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f18a822c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f18a822c

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: f18a822c8bae1d77db7a3b033a84d979b5bf0392
Parents: 350dace
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed May 17 13:54:48 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed May 17 13:54:48 2017 +0300

----------------------------------------------------------------------
 .../KNOX/0.5.0.3.0/service_advisor.py           | 253 +++++++++++++++++++
 1 file changed, 253 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f18a822c/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/service_advisor.py
new file mode 100644
index 0000000..575f910
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/service_advisor.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+import xml.etree.ElementTree as ET
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class KnoxServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(KnoxServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = KnoxRecommender()
+    recommender.recommendKnoxConfigurationsFromHDP22(configurations, clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = KnoxValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class KnoxRecommender(service_advisor.ServiceAdvisor):
+  """
+  Knox Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(KnoxRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+  def recommendKnoxConfigurationsFromHDP22(self, configurations, clusterData, services, hosts):
+    if "ranger-env" in services["configurations"] and "ranger-knox-plugin-properties" in services["configurations"] and \
+        "ranger-knox-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putKnoxRangerPluginProperty = self.putProperty(configurations, "ranger-knox-plugin-properties", services)
+      rangerEnvKnoxPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-knox-plugin-enabled"]
+      putKnoxRangerPluginProperty("ranger-knox-plugin-enabled", rangerEnvKnoxPluginProperty)
+
+    if 'topology' in services["configurations"] and 'content' in services["configurations"]["topology"]["properties"]:
+      putKnoxTopologyContent = self.putProperty(configurations, "topology", services)
+      rangerPluginEnabled = ''
+      if 'ranger-knox-plugin-properties' in configurations and 'ranger-knox-plugin-enabled' in  configurations['ranger-knox-plugin-properties']['properties']:
+        rangerPluginEnabled = configurations['ranger-knox-plugin-properties']['properties']['ranger-knox-plugin-enabled']
+      elif 'ranger-knox-plugin-properties' in services['configurations'] and 'ranger-knox-plugin-enabled' in services['configurations']['ranger-knox-plugin-properties']['properties']:
+        rangerPluginEnabled = services['configurations']['ranger-knox-plugin-properties']['properties']['ranger-knox-plugin-enabled']
+
+      # check if authorization provider already added
+      topologyContent = services["configurations"]["topology"]["properties"]["content"]
+      authorizationProviderExists = False
+      authNameChanged = False
+      root = ET.fromstring(topologyContent)
+      if root is not None:
+        gateway = root.find("gateway")
+        if gateway is not None:
+          for provider in gateway.findall('provider'):
+            role = provider.find('role')
+            if role is not None and role.text and role.text.lower() == "authorization":
+              authorizationProviderExists = True
+
+            name = provider.find('name')
+            if name is not None and name.text == "AclsAuthz" and rangerPluginEnabled \
+               and rangerPluginEnabled.lower() == "Yes".lower():
+              newAuthName = "XASecurePDPKnox"
+              authNameChanged = True
+            elif name is not None and (((not rangerPluginEnabled) or rangerPluginEnabled.lower() != "Yes".lower()) \
+               and name.text == 'XASecurePDPKnox'):
+              newAuthName = "AclsAuthz"
+              authNameChanged = True
+
+            if authNameChanged:
+              name.text = newAuthName
+              putKnoxTopologyContent('content', ET.tostring(root))
+
+            if authorizationProviderExists:
+              break
+
+      if not authorizationProviderExists:
+        if root is not None:
+          gateway = root.find("gateway")
+          if gateway is not None:
+            provider = ET.SubElement(gateway, 'provider')
+
+            role = ET.SubElement(provider, 'role')
+            role.text = "authorization"
+
+            name = ET.SubElement(provider, 'name')
+            if rangerPluginEnabled and rangerPluginEnabled.lower() == "Yes".lower():
+              name.text = "XASecurePDPKnox"
+            else:
+              name.text = "AclsAuthz"
+
+            enabled = ET.SubElement(provider, 'enabled')
+            enabled.text = "true"
+
+            #TODO add pretty format for newly added provider
+            putKnoxTopologyContent('content', ET.tostring(root))
+
+
+
+
+class KnoxValidator(service_advisor.ServiceAdvisor):
+  """
+  Knox Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(KnoxValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("ranger-knox-plugin-properties", self.validateKnoxRangerPluginConfigurationsFromHDP22),
+                       ]
+
+  def validateKnoxRangerPluginConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-knox-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-knox-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = self.getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-knox-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-knox-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-knox-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-knox-plugin-properties/ranger-knox-plugin-enabled must correspond ranger-env/ranger-knox-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-knox-plugin-properties")
+
+
+
+
+


[24/25] ambari git commit: AMBARI-21043. Backport Ambari-17694 - Kafka listeners property does not show SASL_PLAINTEXT protocol when Kerberos is enabled (rlevas)

Posted by jl...@apache.org.
AMBARI-21043. Backport Ambari-17694 - Kafka listeners property does not show SASL_PLAINTEXT protocol when Kerberos is enabled (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bba703bc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bba703bc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bba703bc

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: bba703bc600555e596c49aef8749df65c9ac918c
Parents: 8bf136a
Author: Robert Levas <rl...@hortonworks.com>
Authored: Wed May 17 14:33:03 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Wed May 17 14:33:03 2017 -0400

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog251.java       | 47 +++++++++-
 .../stacks/HDP/2.3/services/stack_advisor.py    |  2 +-
 .../server/upgrade/UpgradeCatalog251Test.java   | 92 ++++++++++++++++++++
 3 files changed, 139 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bba703bc/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
index 6f8f2a6..745890c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog251.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,9 +18,18 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.commons.lang.StringUtils;
 
 import com.google.inject.Inject;
 import com.google.inject.Injector;
@@ -33,6 +42,8 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
   static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
   static final String HRC_IS_BACKGROUND_COLUMN = "is_background";
 
+  protected static final String KAFKA_BROKER_CONFIG = "kafka-broker";
+
   /**
    * Constructor.
    *
@@ -79,6 +90,40 @@ public class UpgradeCatalog251 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    updateKAFKAConfigs();
+  }
+
+  /**
+   * Ensure that the updates from Ambari 2.4.0 are applied in the event the initial version is
+   * Ambari 2.5.0, since this Kafka change failed to make it into Ambari 2.5.0.
+   *
+   * If the base version was before Ambari 2.5.0, this method should wind up doing nothing.
+   * @throws AmbariException
+   */
+  protected void updateKAFKAConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          Set<String> installedServices = cluster.getServices().keySet();
+
+          if (installedServices.contains("KAFKA") && cluster.getSecurityType() == SecurityType.KERBEROS) {
+            Config kafkaBroker = cluster.getDesiredConfigByType(KAFKA_BROKER_CONFIG);
+            if (kafkaBroker != null) {
+              String listenersPropertyValue = kafkaBroker.getProperties().get("listeners");
+              if (StringUtils.isNotEmpty(listenersPropertyValue)) {
+                String newListenersPropertyValue = listenersPropertyValue.replaceAll("\\bPLAINTEXT\\b", "PLAINTEXTSASL");
+                if(!newListenersPropertyValue.equals(listenersPropertyValue)) {
+                  updateConfigurationProperties(KAFKA_BROKER_CONFIG, Collections.singletonMap("listeners", newListenersPropertyValue), true, false);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/bba703bc/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 8cefdac..9efcee0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -918,7 +918,7 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
                "hive-site": self.validateHiveConfigurations},
       "HBASE": {"hbase-site": self.validateHBASEConfigurations},
-      "KAKFA": {"kafka-broker": self.validateKAFKAConfigurations},
+      "KAFKA": {"kafka-broker": self.validateKAFKAConfigurations},
       "RANGER": {"admin-properties": self.validateRangerAdminConfigurations,
                  "ranger-env": self.validateRangerConfigurationsEnv}
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/bba703bc/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
index 4575998..d725ec4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog251Test.java
@@ -20,21 +20,29 @@ package org.apache.ambari.server.upgrade;
 
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMockBuilder;
+import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.newCapture;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.reset;
 import static org.easymock.EasyMock.verify;
 
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.Statement;
+import java.util.Collections;
+import java.util.Map;
 
 import javax.persistence.EntityManager;
 
 import org.apache.ambari.server.actionmanager.ActionManager;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.controller.MaintenanceStateHelper;
 import org.apache.ambari.server.orm.DBAccessor;
@@ -42,10 +50,12 @@ import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.Capture;
 import org.easymock.EasyMockRunner;
+import org.easymock.EasyMockSupport;
 import org.easymock.Mock;
 import org.easymock.MockType;
 import org.junit.After;
@@ -53,8 +63,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
+import org.springframework.security.crypto.password.PasswordEncoder;
 
 import com.google.gson.Gson;
+import com.google.inject.AbstractModule;
 import com.google.inject.Binder;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
@@ -163,4 +175,84 @@ public class UpgradeCatalog251Test {
     Assert.assertEquals(Integer.valueOf(0), captured.getDefaultValue());
     Assert.assertEquals(Short.class, captured.getType());
   }
+
+  @Test
+  public void testExecuteDMLUpdates() throws Exception {
+    Method updateKAFKAConfigs = UpgradeCatalog251.class.getDeclaredMethod("updateKAFKAConfigs");
+
+    UpgradeCatalog251 upgradeCatalog251 = createMockBuilder(UpgradeCatalog251.class)
+        .addMockedMethod(updateKAFKAConfigs)
+        .createMock();
+
+    Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
+    field.set(upgradeCatalog251, dbAccessor);
+
+    upgradeCatalog251.updateKAFKAConfigs();
+    expectLastCall().once();
+
+    replay(upgradeCatalog251, dbAccessor);
+
+    upgradeCatalog251.executeDMLUpdates();
+
+    verify(upgradeCatalog251, dbAccessor);
+  }
+
+
+  @Test
+  public void testUpdateKAFKAConfigs() throws Exception{
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    Map<String, String> initialProperties = Collections.singletonMap("listeners", "PLAINTEXT://localhost:6667,SSL://localhost:6666");
+    Map<String, String> expectedUpdates = Collections.singletonMap("listeners", "PLAINTEXTSASL://localhost:6667,SSL://localhost:6666");
+
+    final Config kafkaBroker = easyMockSupport.createNiceMock(Config.class);
+    expect(kafkaBroker.getProperties()).andReturn(initialProperties).times(1);
+    // Re-entrant test
+    expect(kafkaBroker.getProperties()).andReturn(expectedUpdates).times(1);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        bind(PasswordEncoder.class).toInstance(createNiceMock(PasswordEncoder.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).atLeastOnce();
+    expect(mockClusters.getClusters()).andReturn(Collections.singletonMap("normal", mockClusterExpected)).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("kafka-broker")).andReturn(kafkaBroker).atLeastOnce();
+    expect(mockClusterExpected.getSecurityType()).andReturn(SecurityType.KERBEROS).atLeastOnce();
+    expect(mockClusterExpected.getServices()).andReturn(Collections.<String, Service>singletonMap("KAFKA", null)).atLeastOnce();
+
+    UpgradeCatalog251 upgradeCatalog251 = createMockBuilder(UpgradeCatalog251.class)
+        .withConstructor(Injector.class)
+        .withArgs(mockInjector)
+        .addMockedMethod("updateConfigurationProperties", String.class,
+            Map.class, boolean.class, boolean.class)
+        .createMock();
+
+
+    // upgradeCatalog251.updateConfigurationProperties is only expected to execute once since no changes are
+    // expected when the relevant data have been previously changed
+    upgradeCatalog251.updateConfigurationProperties("kafka-broker", expectedUpdates, true, false);
+    expectLastCall().once();
+
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog251);
+
+    // Execute the first time... upgrading to Ambari 2.4.0
+    upgradeCatalog251.updateKAFKAConfigs();
+
+    // Test reentry... upgrading from Ambari 2.4.0
+    upgradeCatalog251.updateKAFKAConfigs();
+
+    easyMockSupport.verifyAll();
+  }
 }


[09/25] ambari git commit: AMBARI-21032. HDP 3.0 TP - create service definition for Knox with configs, kerberos, widgets, etc.(vbrodetsky)

Posted by jl...@apache.org.
AMBARI-21032. HDP 3.0 TP - create service definition for Knox with configs, kerberos, widgets, etc.(vbrodetsky)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9adffcf7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9adffcf7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9adffcf7

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 9adffcf7a93d40ad727796a8a1686da0e6408893
Parents: 8141665
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed May 17 00:16:45 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed May 17 00:16:45 2017 +0300

----------------------------------------------------------------------
 .../common-services/KNOX/0.5.0.3.0/alerts.json  |  32 ++
 .../0.5.0.3.0/configuration/admin-topology.xml  |  97 ++++
 .../0.5.0.3.0/configuration/gateway-log4j.xml   | 110 +++++
 .../0.5.0.3.0/configuration/gateway-site.xml    |  71 +++
 .../KNOX/0.5.0.3.0/configuration/knox-env.xml   |  83 ++++
 .../configuration/knoxsso-topology.xml          | 126 +++++
 .../KNOX/0.5.0.3.0/configuration/ldap-log4j.xml |  93 ++++
 .../configuration/ranger-knox-audit.xml         | 132 ++++++
 .../ranger-knox-plugin-properties.xml           | 132 ++++++
 .../configuration/ranger-knox-policymgr-ssl.xml |  66 +++
 .../configuration/ranger-knox-security.xml      |  64 +++
 .../KNOX/0.5.0.3.0/configuration/topology.xml   | 174 +++++++
 .../KNOX/0.5.0.3.0/configuration/users-ldif.xml | 140 ++++++
 .../KNOX/0.5.0.3.0/kerberos.json                |  81 ++++
 .../common-services/KNOX/0.5.0.3.0/metainfo.xml | 109 +++++
 .../package/files/validateKnoxStatus.py         |  43 ++
 .../KNOX/0.5.0.3.0/package/scripts/knox.py      | 192 ++++++++
 .../0.5.0.3.0/package/scripts/knox_gateway.py   | 220 +++++++++
 .../KNOX/0.5.0.3.0/package/scripts/knox_ldap.py |  59 +++
 .../KNOX/0.5.0.3.0/package/scripts/params.py    |  29 ++
 .../0.5.0.3.0/package/scripts/params_linux.py   | 457 +++++++++++++++++++
 .../0.5.0.3.0/package/scripts/params_windows.py |  71 +++
 .../0.5.0.3.0/package/scripts/service_check.py  |  96 ++++
 .../package/scripts/setup_ranger_knox.py        | 121 +++++
 .../0.5.0.3.0/package/scripts/status_params.py  |  59 +++
 .../KNOX/0.5.0.3.0/package/scripts/upgrade.py   | 118 +++++
 .../package/templates/input.config-knox.json.j2 |  60 +++
 .../package/templates/krb5JAASLogin.conf.j2     |  30 ++
 .../KNOX/0.5.0.3.0/role_command_order.json      |   7 +
 .../stacks/HDP/3.0/services/KNOX/metainfo.xml   |  27 ++
 30 files changed, 3099 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/alerts.json b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/alerts.json
new file mode 100644
index 0000000..4986e04
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/alerts.json
@@ -0,0 +1,32 @@
+{
+  "KNOX": {
+    "service": [],
+    "KNOX_GATEWAY": [
+      {
+        "name": "knox_gateway_process",
+        "label": "Knox Gateway Process",
+        "description": "This host-level alert is triggered if the Knox Gateway cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{gateway-site/gateway.port}}",
+          "default_port": 8443,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/admin-topology.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/admin-topology.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/admin-topology.xml
new file mode 100644
index 0000000..3030364
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/admin-topology.xml
@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- topology file -->
+  <property>
+    <name>content</name>
+    <display-name>admin-topology template</display-name>
+    <value>
+    &lt;topology&gt;
+
+        &lt;gateway&gt;
+
+             &lt;provider&gt;
+                &lt;role&gt;authentication&lt;/role&gt;
+                &lt;name&gt;ShiroProvider&lt;/name&gt;
+                &lt;enabled&gt;true&lt;/enabled&gt;
+                &lt;param&gt;
+                    &lt;name&gt;sessionTimeout&lt;/name&gt;
+                    &lt;value&gt;30&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;main.ldapRealm&lt;/name&gt;
+                    &lt;value&gt;org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;main.ldapRealm.userDnTemplate&lt;/name&gt;
+                    &lt;value&gt;uid={0},ou=people,dc=hadoop,dc=apache,dc=org&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;main.ldapRealm.contextFactory.url&lt;/name&gt;
+                    &lt;value&gt;ldap://{{knox_host_name}}:33389&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;main.ldapRealm.contextFactory.authenticationMechanism&lt;/name&gt;
+                    &lt;value&gt;simple&lt;/value&gt;
+                &lt;/param&gt;
+                &lt;param&gt;
+                    &lt;name&gt;urls./**&lt;/name&gt;
+                    &lt;value&gt;authcBasic&lt;/value&gt;
+                &lt;/param&gt;
+            &lt;/provider&gt;
+
+            &lt;provider&gt;
+                &lt;role&gt;authorization&lt;/role&gt;
+                &lt;name&gt;AclsAuthz&lt;/name&gt;
+                &lt;enabled&gt;true&lt;/enabled&gt;
+                &lt;param&gt;
+                    &lt;name&gt;knox.acl&lt;/name&gt;
+                    &lt;value&gt;admin;*;*&lt;/value&gt;
+                &lt;/param&gt;
+            &lt;/provider&gt;
+
+            &lt;provider&gt;
+                &lt;role&gt;identity-assertion&lt;/role&gt;
+                &lt;name&gt;Default&lt;/name&gt;
+                &lt;enabled&gt;true&lt;/enabled&gt;
+            &lt;/provider&gt;
+
+        &lt;/gateway&gt;
+
+        &lt;service&gt;
+            &lt;role&gt;KNOX&lt;/role&gt;
+        &lt;/service&gt;
+
+    &lt;/topology&gt;
+
+    </value>
+    <description>
+        The configuration specifies the Knox admin API configuration and access details. The authentication provider should be configured to match your deployment details.
+    </description>
+    <value-attributes>
+      <type>content</type>
+      <empty-value-valid>true</empty-value-valid>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-log4j.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-log4j.xml
new file mode 100644
index 0000000..6408f99
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-log4j.xml
@@ -0,0 +1,110 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+   <property>
+    <name>knox_gateway_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Knox Gateway Log: backup file size</display-name>
+   <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_gateway_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Knox Gateway Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>gateway-log4j template</display-name>
+    <value>
+
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements. See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership. The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License. You may obtain a copy of the License at
+      #
+      # http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+
+      app.log.dir=${launcher.dir}/../logs
+      app.log.file=${launcher.name}.log
+      app.audit.file=${launcher.name}-audit.log
+
+      log4j.rootLogger=ERROR, drfa
+
+      log4j.logger.org.apache.hadoop.gateway=INFO
+      #log4j.logger.org.apache.hadoop.gateway=DEBUG
+
+      #log4j.logger.org.eclipse.jetty=DEBUG
+      #log4j.logger.org.apache.shiro=DEBUG
+      #log4j.logger.org.apache.http=DEBUG
+      #log4j.logger.org.apache.http.client=DEBUG
+      #log4j.logger.org.apache.http.headers=DEBUG
+      #log4j.logger.org.apache.http.wire=DEBUG
+
+      log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+      log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+      log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+      log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.drfa.File=${app.log.dir}/${app.log.file}
+      log4j.appender.drfa.DatePattern=.yyyy-MM-dd
+      log4j.appender.drfa.layout=org.apache.log4j.PatternLayout
+      log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+      log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB
+      log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}
+
+      log4j.logger.audit=INFO, auditfile
+      log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}
+      log4j.appender.auditfile.Append = true
+      log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd
+      log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout
+
+    </value>
+    <description>
+      content for log4j.properties file for Knox.
+    </description>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-site.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-site.xml
new file mode 100644
index 0000000..2686dff
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/gateway-site.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<!-- The default settings for Knox. -->
+<!-- Edit gateway-site.xml to change settings for your local -->
+<!-- install. -->
+<configuration supports_final="false">
+  <property>
+    <name>gateway.port</name>
+    <value>8443</value>
+    <description>The HTTP port for the Gateway.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>gateway.path</name>
+    <value>gateway</value>
+    <description>The default context path for the gateway.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>gateway.gateway.conf.dir</name>
+    <value>deployments</value>
+    <description>The directory within GATEWAY_HOME that contains gateway topology files and deployments.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>gateway.hadoop.kerberos.secured</name>
+    <value>false</value>
+    <description>Boolean flag indicating whether the Hadoop cluster protected by Gateway is secured with Kerberos</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>java.security.krb5.conf</name>
+    <value>/etc/knox/conf/krb5.conf</value>
+    <description>Absolute path to krb5.conf file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>java.security.auth.login.config</name>
+    <value>/etc/knox/conf/krb5JAASLogin.conf</value>
+    <description>Absolute path to JASS login config file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>sun.security.krb5.debug</name>
+    <value>false</value>
+    <description>Boolean flag indicating whether to enable debug messages for krb5 authentication</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>gateway.websocket.feature.enabled</name>
+    <value>{{websocket_support}}</value>
+    <description>Enable this if you want websocket support</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knox-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knox-env.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knox-env.xml
new file mode 100644
index 0000000..e1ca45a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knox-env.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- knox-env.sh -->
+  <property require-input="true">
+    <name>knox_master_secret</name>
+    <value/>
+    <display-name>Knox Master Secret</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>password to use as the master secret</description>
+    <value-attributes>
+      <type>password</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_user</name>
+    <display-name>Knox User</display-name>
+    <value>knox</value>
+    <property-type>USER</property-type>
+    <description>Knox Username.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_group</name>
+    <display-name>Knox Group</display-name>
+    <value>knox</value>
+    <property-type>GROUP</property-type>
+    <description>Knox Group.</description>
+    <value-attributes>
+      <type>user</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_pid_dir</name>
+    <value>/var/run/knox</value>
+    <display-name>Knox PID dir</display-name>
+    <description>Knox PID dir.</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_principal_name</name>
+    <description>Knox principal name</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_keytab_path</name>
+    <description>Knox keytab path</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knoxsso-topology.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knoxsso-topology.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knoxsso-topology.xml
new file mode 100644
index 0000000..1ea8601
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/knoxsso-topology.xml
@@ -0,0 +1,126 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- topology file -->
+    <property>
+        <name>content</name>
+        <display-name>knoxsso-topology template</display-name>
+        <value>
+            &lt;topology&gt;
+            &lt;gateway&gt;
+            &lt;provider&gt;
+            &lt;role&gt;webappsec&lt;/role&gt;
+            &lt;name&gt;WebAppSec&lt;/name&gt;
+            &lt;enabled&gt;true&lt;/enabled&gt;
+            &lt;param&gt;&lt;name&gt;xframe.options.enabled&lt;/name&gt;&lt;value&gt;true&lt;/value&gt;&lt;/param&gt;
+            &lt;/provider&gt;
+
+            &lt;provider&gt;
+            &lt;role&gt;authentication&lt;/role&gt;
+            &lt;name&gt;ShiroProvider&lt;/name&gt;
+            &lt;enabled&gt;true&lt;/enabled&gt;
+            &lt;param&gt;
+            &lt;name&gt;sessionTimeout&lt;/name&gt;
+            &lt;value&gt;30&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;redirectToUrl&lt;/name&gt;
+            &lt;value&gt;/gateway/knoxsso/knoxauth/login.html&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;restrictedCookies&lt;/name&gt;
+            &lt;value&gt;rememberme,WWW-Authenticate&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm&lt;/name&gt;
+            &lt;value&gt;org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapContextFactory&lt;/name&gt;
+            &lt;value&gt;org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.contextFactory&lt;/name&gt;
+            &lt;value&gt;$ldapContextFactory&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.userDnTemplate&lt;/name&gt;
+            &lt;value&gt;uid={0},ou=people,dc=hadoop,dc=apache,dc=org&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.contextFactory.url&lt;/name&gt;
+            &lt;value&gt;ldap://localhost:33389&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.authenticationCachingEnabled&lt;/name&gt;
+            &lt;value&gt;false&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;main.ldapRealm.contextFactory.authenticationMechanism&lt;/name&gt;
+            &lt;value&gt;simple&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;urls./**&lt;/name&gt;
+            &lt;value&gt;authcBasic&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;/provider&gt;
+
+            &lt;provider&gt;
+            &lt;role&gt;identity-assertion&lt;/role&gt;
+            &lt;name&gt;Default&lt;/name&gt;
+            &lt;enabled&gt;true&lt;/enabled&gt;
+            &lt;/provider&gt;
+            &lt;/gateway&gt;
+
+            &lt;application&gt;
+            &lt;name&gt;knoxauth&lt;/name&gt;
+            &lt;/application&gt;
+
+            &lt;service&gt;
+            &lt;role&gt;KNOXSSO&lt;/role&gt;
+            &lt;param&gt;
+            &lt;name&gt;knoxsso.cookie.secure.only&lt;/name&gt;
+            &lt;value&gt;false&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;knoxsso.token.ttl&lt;/name&gt;
+            &lt;value&gt;30000&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;param&gt;
+            &lt;name&gt;knoxsso.redirect.whitelist.regex&lt;/name&gt;
+            &lt;value&gt;^https?:\/\/(localhost|127\.0\.0\.1|0:0:0:0:0:0:0:1|::1):[0-9].*$&lt;/value&gt;
+            &lt;/param&gt;
+            &lt;/service&gt;
+
+            &lt;/topology&gt;
+        </value>
+        <description>
+            The configuration specifies the KnoxSSO provider integration, cookie and token management details.
+        </description>
+        <value-attributes>
+            <type>content</type>
+            <empty-value-valid>true</empty-value-valid>
+            <show-property-name>false</show-property-name>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ldap-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ldap-log4j.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ldap-log4j.xml
new file mode 100644
index 0000000..57e156c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ldap-log4j.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software<display-name> template</display-name>
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="false">
+   <property>
+    <name>knox_ldap_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Knox LDAP Log: backup file size</display-name>
+<value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>knox_ldap_log_maxbackupindex</name>
+    <value>20</value>
+    <description>The number of backup files</description>
+    <display-name>Knox LDAP Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>ldap-log4j template</display-name>
+    <value>
+        # Licensed to the Apache Software Foundation (ASF) under one
+        # or more contributor license agreements.  See the NOTICE file
+        # distributed with this work for additional information
+        # regarding copyright ownership.  The ASF licenses this file
+        # to you under the Apache License, Version 2.0 (the
+        # "License"); you may not use this file except in compliance
+        # with the License.  You may obtain a copy of the License at
+        #
+        #     http://www.apache.org/licenses/LICENSE-2.0
+        #
+        # Unless required by applicable law or agreed to in writing, software
+        # distributed under the License is distributed on an "AS IS" BASIS,
+        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        # See the License for the specific language governing permissions and
+        # limitations under the License.
+
+        app.log.dir=${launcher.dir}/../logs
+        app.log.file=${launcher.name}.log
+
+        log4j.rootLogger=ERROR, drfa
+        log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO
+        log4j.logger.org.apache.directory=WARN
+
+        log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+        log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+        log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+        log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
+        log4j.appender.drfa.File=${app.log.dir}/${app.log.file}
+        log4j.appender.drfa.DatePattern=.yyyy-MM-dd
+        log4j.appender.drfa.layout=org.apache.log4j.PatternLayout
+        log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+        log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB
+        log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}
+
+    </value>
+    <description>
+      content for log4j.properties file for the demo LDAP that comes with Knox.
+    </description>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-audit.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-audit.xml
new file mode 100644
index 0000000..f3a0f99
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-audit.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/knox/audit/hdfs/spool</value>
+    <description>/var/log/knox/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/knox/audit/solr/spool</value>
+    <description>/var/log/knox/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>ranger.plugin.knox.ambari.cluster.name</name>
+    <value>{{cluster_name}}</value>
+    <description>Capture cluster name from where Ranger knox plugin is enabled.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-plugin-properties.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-plugin-properties.xml
new file mode 100644
index 0000000..d8b9d54
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-plugin-properties.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for KNOX</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-knox-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for KNOX</display-name>
+    <description>Enable ranger knox plugin ?</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-knox-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>admin</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>admin-password</value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Ranger repository config password</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>KNOX_HOME</name>
+    <value>/usr/hdp/current/knox-server</value>
+    <display-name>Knox Home</display-name>
+    <description>Knox home folder</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-policymgr-ssl.xml
new file mode 100644
index 0000000..bb0878f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/knox-server/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/knox-server/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-security.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-security.xml
new file mode 100644
index 0000000..37bda4c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/ranger-knox-security.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.knox.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Knox instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminJersey2RESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.rest.ssl.config.file</name>
+    <value>/usr/hdp/current/knox-server/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.knox.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/topology.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/topology.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/topology.xml
new file mode 100644
index 0000000..594ab18
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/topology.xml
@@ -0,0 +1,174 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- topology file -->
+  <property>
+    <name>content</name>
+    <display-name>topology template</display-name>
+    <value>
+        &lt;topology&gt;
+
+            &lt;gateway&gt;
+
+                &lt;provider&gt;
+                    &lt;role&gt;authentication&lt;/role&gt;
+                    &lt;name&gt;ShiroProvider&lt;/name&gt;
+                    &lt;enabled&gt;true&lt;/enabled&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;sessionTimeout&lt;/name&gt;
+                        &lt;value&gt;30&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm&lt;/name&gt;
+                        &lt;value&gt;org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm.userDnTemplate&lt;/name&gt;
+                        &lt;value&gt;uid={0},ou=people,dc=hadoop,dc=apache,dc=org&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm.contextFactory.url&lt;/name&gt;
+                        &lt;value&gt;ldap://{{knox_host_name}}:33389&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm.contextFactory.authenticationMechanism&lt;/name&gt;
+                        &lt;value&gt;simple&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;urls./**&lt;/name&gt;
+                        &lt;value&gt;authcBasic&lt;/value&gt;
+                    &lt;/param&gt;
+                &lt;/provider&gt;
+
+                &lt;provider&gt;
+                    &lt;role&gt;identity-assertion&lt;/role&gt;
+                    &lt;name&gt;Default&lt;/name&gt;
+                    &lt;enabled&gt;true&lt;/enabled&gt;
+                &lt;/provider&gt;
+
+                &lt;provider&gt;
+                    &lt;role&gt;authorization&lt;/role&gt;
+                    &lt;name&gt;AclsAuthz&lt;/name&gt;
+                    &lt;enabled&gt;true&lt;/enabled&gt;
+                &lt;/provider&gt;
+
+            &lt;/gateway&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;NAMENODE&lt;/role&gt;
+                &lt;url&gt;hdfs://{{namenode_host}}:{{namenode_rpc_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;JOBTRACKER&lt;/role&gt;
+                &lt;url&gt;rpc://{{rm_host}}:{{jt_rpc_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;WEBHDFS&lt;/role&gt;
+                {{webhdfs_service_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;WEBHCAT&lt;/role&gt;
+                &lt;url&gt;http://{{webhcat_server_host}}:{{templeton_port}}/templeton&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;OOZIE&lt;/role&gt;
+                &lt;url&gt;http://{{oozie_server_host}}:{{oozie_server_port}}/oozie&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;WEBHBASE&lt;/role&gt;
+                &lt;url&gt;http://{{hbase_master_host}}:{{hbase_master_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;HIVE&lt;/role&gt;
+                &lt;url&gt;http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;RESOURCEMANAGER&lt;/role&gt;
+                &lt;url&gt;http://{{rm_host}}:{{rm_port}}/ws&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-COORDINATOR-UI&lt;/role&gt;
+                {{druid_coordinator_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-COORDINATOR&lt;/role&gt;
+                {{druid_coordinator_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-OVERLORD-UI&lt;/role&gt;
+                {{druid_overlord_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-OVERLORD&lt;/role&gt;
+                {{druid_overlord_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-ROUTER&lt;/role&gt;
+                {{druid_router_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;DRUID-BROKER&lt;/role&gt;
+                {{druid_broker_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;ZEPPELINUI&lt;/role&gt;
+                {{zeppelin_ui_urls}}
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;ZEPPELINWS&lt;/role&gt;
+                {{zeppelin_ws_urls}}
+            &lt;/service&gt;
+
+        &lt;/topology&gt;
+    </value>
+    <description>
+        The configuration specifies the Hadoop cluster services Knox will provide access to.
+    </description>
+    <value-attributes>
+      <type>content</type>
+      <empty-value-valid>true</empty-value-valid>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-knox-plugin-properties</type>
+        <name>ranger-knox-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/users-ldif.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/users-ldif.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/users-ldif.xml
new file mode 100644
index 0000000..eefa8c9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/configuration/users-ldif.xml
@@ -0,0 +1,140 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>users-ldif template</display-name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: 1
+
+# Please replace with site specific values
+dn: dc=hadoop,dc=apache,dc=org
+objectclass: organization
+objectclass: dcObject
+o: Hadoop
+dc: hadoop
+
+# Entry for a sample people container
+# Please replace with site specific values
+dn: ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:organizationalUnit
+ou: people
+
+# Entry for a sample end user
+# Please replace with site specific values
+dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: Guest
+sn: User
+uid: guest
+userPassword:guest-password
+
+# entry for sample user admin
+dn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: Admin
+sn: Admin
+uid: admin
+userPassword:admin-password
+
+# entry for sample user sam
+dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: sam
+sn: sam
+uid: sam
+userPassword:sam-password
+
+# entry for sample user tom
+dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: tom
+sn: tom
+uid: tom
+userPassword:tom-password
+
+# create FIRST Level groups branch
+dn: ou=groups,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:organizationalUnit
+ou: groups
+description: generic groups branch
+
+# create the analyst group under groups
+dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass: groupofnames
+cn: analyst
+description:analyst  group
+member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org
+member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org
+
+
+# create the scientist group under groups
+dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass: groupofnames
+cn: scientist
+description: scientist group
+member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org
+
+        </value>
+    <description>
+            content for users-ldif file for the demo LDAP that comes with Knox.
+        </description>
+    <value-attributes>
+      <type>content</type>
+      <empty-value-valid>true</empty-value-valid>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/kerberos.json
new file mode 100644
index 0000000..2d8aa0d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/kerberos.json
@@ -0,0 +1,81 @@
+{
+  "services": [
+    {
+      "name": "KNOX",
+      "components": [
+        {
+          "name": "KNOX_GATEWAY",
+          "identities": [
+            {
+              "name": "knox_principal",
+              "principal": {
+                "value": "${knox-env/knox_user}/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "knox-env/knox_principal_name",
+                "local_username": "${knox-env/knox_user}"
+
+              },
+              "keytab": {
+                "file": "${keytab_dir}/knox.service.keytab",
+                "owner": {
+                  "name": "${knox-env/knox_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "knox-env/knox_keytab_path"
+              }
+            },
+            {
+              "name": "/KNOX/KNOX_GATEWAY/knox_principal",
+              "principal": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.principal"                
+              },
+              "keytab": {
+                "configuration": "ranger-knox-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "gateway-site": {
+                "gateway.hadoop.kerberos.secured": "true",
+                "java.security.krb5.conf": "/etc/krb5.conf"
+              }
+            },
+            {
+              "core-site": {
+                "hadoop.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "hadoop.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "webhcat-site": {
+                "webhcat.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "webhcat.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "oozie-site": {
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "ranger-knox-audit": {
+                "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+                "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+                "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+                "xasecure.audit.jaas.Client.option.storeKey": "false",
+                "xasecure.audit.jaas.Client.option.serviceName": "solr",
+                "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/metainfo.xml
new file mode 100644
index 0000000..8954d0d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/metainfo.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KNOX</name>
+      <displayName>Knox</displayName>
+      <comment>Provides a single point of authentication and access for Apache Hadoop services in a cluster</comment>
+      <version>0.5.0.3.0</version>
+      <components>
+        <component>
+          <name>KNOX_GATEWAY</name>
+          <displayName>Knox Gateway</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/knox_gateway.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>knox_gateway</logId>
+              <primary>true</primary>
+            </log>
+            <log>
+              <logId>knox_cli</logId>
+            </log>
+            <log>
+              <logId>knox_ldap</logId>
+            </log>
+          </logs>
+          <customCommands>
+            <customCommand>
+              <name>STARTDEMOLDAP</name>
+              <commandScript>
+                <script>scripts/knox_gateway.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>STOPDEMOLDAP</name>
+              <commandScript>
+                <script>scripts/knox_gateway.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>knox_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>knox-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>gateway-site</config-type>
+        <config-type>gateway-log4j</config-type>
+        <config-type>topology</config-type>
+        <config-type>admin-topology</config-type>
+        <config-type>knoxsso-topology</config-type>
+        <config-type>ranger-knox-plugin-properties</config-type>
+        <config-type>ranger-knox-audit</config-type>
+        <config-type>ranger-knox-policymgr-ssl</config-type>
+        <config-type>ranger-knox-security</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/files/validateKnoxStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/files/validateKnoxStatus.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/files/validateKnoxStatus.py
new file mode 100644
index 0000000..257abfb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/files/validateKnoxStatus.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import optparse
+import socket
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options]")
+  parser.add_option("-p", "--port", dest="port", help="Port for Knox process")
+  parser.add_option("-n", "--hostname", dest="hostname", help="Hostname of Knox Gateway component")
+
+  (options, args) = parser.parse_args()
+  timeout_seconds = 5
+  try:
+    s = socket.create_connection((options.hostname, int(options.port)),timeout=timeout_seconds)
+    print "Successfully connected to %s on port %s" % (options.hostname, options.port)
+    s.close()
+  except socket.error, e:
+    print "Connection to %s on port %s failed: %s" % (options.hostname, options.port, e)
+    exit(1)
+
+if __name__ == "__main__":
+  main()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox.py
new file mode 100644
index 0000000..34b5643
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox.py
@@ -0,0 +1,192 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.get_config import get_config
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.shell import as_user
+from resource_management.core.source import InlineTemplate
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import StackFeature
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def knox():
+  import params
+
+  XmlConfig("gateway-site.xml",
+            conf_dir=params.knox_conf_dir,
+            configurations=params.config['configurations']['gateway-site'],
+            configuration_attributes=params.config['configuration_attributes']['gateway-site'],
+            owner=params.knox_user
+  )
+
+  # Manually overriding service logon user & password set by the installation package
+  ServiceConfig(params.knox_gateway_win_service_name,
+                action="change_user",
+                username = params.knox_user,
+                password = Script.get_password(params.knox_user))
+
+  File(os.path.join(params.knox_conf_dir, "gateway-log4j.properties"),
+       owner=params.knox_user,
+       content=params.gateway_log4j
+  )
+
+  File(os.path.join(params.knox_conf_dir, "topologies", "default.xml"),
+       group=params.knox_group,
+       owner=params.knox_user,
+       content=InlineTemplate(params.topology_template)
+  )
+
+  if params.admin_topology_template:
+    File(os.path.join(params.knox_conf_dir, "topologies", "admin.xml"),
+       group=params.knox_group,
+       owner=params.knox_user,
+       content=InlineTemplate(params.admin_topology_template)
+    )
+
+  if params.version_formatted and check_stack_feature(StackFeature.KNOX_SSO_TOPOLOGY, params.version_formatted):
+    knoxsso_topology_template_content = get_config("knoxsso-topology")
+    if knoxsso_topology_template_content:
+      File(os.path.join(params.knox_conf_dir, "topologies", "knoxsso.xml"),
+        group=params.knox_group,
+        owner=params.knox_user,
+        content=InlineTemplate(params.knoxsso_topology_template)
+      )
+
+  if params.security_enabled:
+    TemplateConfig( os.path.join(params.knox_conf_dir, "krb5JAASLogin.conf"),
+        owner = params.knox_user,
+        template_tag = None
+    )
+
+  if not os.path.isfile(params.knox_master_secret_path):
+    cmd = format('cmd /C {knox_client_bin} create-master --master {knox_master_secret!p}')
+    Execute(cmd)
+    cmd = format('cmd /C {knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
+    Execute(cmd)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def knox():
+    import params
+    Directory([params.knox_data_dir, params.knox_logs_dir, params.knox_pid_dir, params.knox_conf_dir, os.path.join(params.knox_conf_dir, "topologies")],
+              owner = params.knox_user,
+              group = params.knox_group,
+              create_parents = True,
+              cd_access = "a",
+              mode = 0755,
+              recursive_ownership = True,
+    )
+
+    XmlConfig("gateway-site.xml",
+              conf_dir=params.knox_conf_dir,
+              configurations=params.config['configurations']['gateway-site'],
+              configuration_attributes=params.config['configuration_attributes']['gateway-site'],
+              owner=params.knox_user,
+              group=params.knox_group,
+    )
+
+    File(format("{params.knox_conf_dir}/gateway-log4j.properties"),
+         mode=0644,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=InlineTemplate(params.gateway_log4j)
+    )
+
+    File(format("{params.knox_conf_dir}/topologies/default.xml"),
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=InlineTemplate(params.topology_template)
+    )
+
+    if params.admin_topology_template:
+      File(format("{params.knox_conf_dir}/topologies/admin.xml"),
+           group=params.knox_group,
+           owner=params.knox_user,
+           content=InlineTemplate(params.admin_topology_template)
+      )
+
+    if params.version_formatted and check_stack_feature(StackFeature.KNOX_SSO_TOPOLOGY, params.version_formatted):
+      knoxsso_topology_template_content = get_config("knoxsso-topology")
+      if knoxsso_topology_template_content:
+        File(os.path.join(params.knox_conf_dir, "topologies", "knoxsso.xml"),
+            group=params.knox_group,
+            owner=params.knox_user,
+            content=InlineTemplate(params.knoxsso_topology_template)
+        )
+
+    if params.security_enabled:
+      TemplateConfig( format("{knox_conf_dir}/krb5JAASLogin.conf"),
+                      owner = params.knox_user,
+                      template_tag = None
+      )
+
+    cmd = format('{knox_client_bin} create-master --master {knox_master_secret!p}')
+    master_secret_exist = as_user(format('test -f {knox_master_secret_path}'), params.knox_user)
+
+    Execute(cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=master_secret_exist,
+    )
+
+    cmd = format('{knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
+    cert_store_exist = as_user(format('test -f {knox_cert_store_path}'), params.knox_user)
+
+    Execute(cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=cert_store_exist,
+    )
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def update_knox_folder_permissions():
+  import params
+  Directory(params.knox_logs_dir,
+            owner = params.knox_user,
+            group = params.knox_group
+            )
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def update_knox_logfolder_permissions():
+  """
+   Fix for the bug with rpm/deb packages. During installation of the package, they re-apply permissions to the
+   folders below; such behaviour will affect installations with non-standard user name/group and will put
+   cluster in non-working state
+  """
+  import params
+  
+  Directory(params.knox_logs_dir,
+            owner = params.knox_user,
+            group = params.knox_group,
+            create_parents = True,
+            cd_access = "a",
+            mode = 0755,
+            recursive_ownership = True,
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adffcf7/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_gateway.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_gateway.py
new file mode 100644
index 0000000..8996d23
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.3.0/package/scripts/knox_gateway.py
@@ -0,0 +1,220 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.resources.system import File, Execute, Link
+from resource_management.core.resources.service import Service
+from resource_management.core.logger import Logger
+
+
+from ambari_commons import OSConst, OSCheck
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+import upgrade
+from knox import knox, update_knox_logfolder_permissions
+from knox_ldap import ldap
+from setup_ranger_knox import setup_ranger_knox
+
+
+class KnoxGateway(Script):
+  def get_component_name(self):
+    return "knox-server"
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+    File(os.path.join(params.knox_conf_dir, 'topologies', 'sandbox.xml'),
+         action = "delete",
+    )
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    knox()
+    ldap()
+
+  def configureldap(self, env):
+    import params
+    env.set_params(params)
+    ldap()
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class KnoxGatewayWindows(KnoxGateway):
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    # setup_ranger_knox(env)
+    Service(params.knox_gateway_win_service_name, action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    Service(params.knox_gateway_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.knox_gateway_win_service_name)
+
+  def startdemoldap(self, env):
+    import params
+    env.set_params(params)
+    self.configureldap(env)
+    Service(params.knox_ldap_win_service_name, action="start")
+
+  def stopdemoldap(self, env):
+    import params
+    env.set_params(params)
+    Service(params.knox_ldap_win_service_name, action="stop")
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class KnoxGatewayDefault(KnoxGateway):
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # backup the data directory to /tmp/knox-upgrade-backup/knox-data-backup.tar just in case
+    # something happens; Knox is interesting in that they re-generate missing files like
+    # keystores which can cause side effects if the upgrade goes wrong
+    if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
+      absolute_backup_dir = upgrade.backup_data()
+      Logger.info("Knox data was successfully backed up to {0}".format(absolute_backup_dir))
+
+    # <conf-selector-tool> will change the symlink to the conf folder.
+    conf_select.select(params.stack_name, "knox", params.version)
+    stack_select.select("knox-server", params.version)
+
+    # seed the new Knox data directory with the keystores of yesteryear
+    if params.upgrade_direction == Direction.UPGRADE:
+      upgrade.seed_current_data_directory()
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    daemon_cmd = format('{knox_bin} start')
+    no_op_test = format('ls {knox_pid_file} >/dev/null 2>&1 && ps -p `cat {knox_pid_file}` >/dev/null 2>&1')
+    setup_ranger_knox(upgrade_type=upgrade_type)
+    # Used to setup symlink, needed to update the knox managed symlink, in case of custom locations
+    if os.path.islink(params.knox_managed_pid_symlink):
+      Link(params.knox_managed_pid_symlink,
+           to = params.knox_pid_dir,
+      )
+
+    update_knox_logfolder_permissions()
+
+    try:
+      Execute(daemon_cmd,
+              user=params.knox_user,
+              environment={'JAVA_HOME': params.java_home},
+              not_if=no_op_test
+      )
+    except:
+      show_logs(params.knox_logs_dir, params.knox_user)
+      raise
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    daemon_cmd = format('{knox_bin} stop')
+
+    update_knox_logfolder_permissions()
+
+    try:
+      Execute(daemon_cmd,
+              environment={'JAVA_HOME': params.java_home},
+              user=params.knox_user,
+      )
+    except:
+      show_logs(params.knox_logs_dir, params.knox_user)
+      raise
+    
+    File(params.knox_pid_file,
+         action="delete",
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.knox_pid_file)
+
+  def startdemoldap(self, env):
+    import params
+    env.set_params(params)
+    self.configureldap(env)
+    daemon_cmd = format('{ldap_bin} start')
+    no_op_test = format('ls {ldap_pid_file} >/dev/null 2>&1 && ps -p `cat {ldap_pid_file}` >/dev/null 2>&1')
+    Execute(daemon_cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=no_op_test
+    )
+
+  def stopdemoldap(self, env):
+    import params
+    env.set_params(params)
+    self.configureldap(env)
+    daemon_cmd = format('{ldap_bin} stop')
+    Execute(daemon_cmd,
+            environment={'JAVA_HOME': params.java_home},
+            user=params.knox_user,
+            )
+    File(params.ldap_pid_file,
+      action = "delete"
+    )
+      
+  def get_log_folder(self):
+    import params
+    return params.knox_logs_dir
+  
+  def get_user(self):
+    import params
+    return params.knox_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.knox_pid_file]
+
+
+if __name__ == "__main__":
+  KnoxGateway().execute()


[04/25] ambari git commit: AMBARI-21030. Custom task script should output specific exception (dlysnichenko)

Posted by jl...@apache.org.
AMBARI-21030. Custom task script should output specific exception (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/47b845fb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/47b845fb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/47b845fb

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 47b845fb9acf2cdfe196731f05ec1395142a1fb5
Parents: 8cebc18
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Tue May 16 18:45:48 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Tue May 16 18:46:27 2017 +0300

----------------------------------------------------------------------
 .../src/main/resources/custom_actions/scripts/ru_execute_tasks.py  | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/47b845fb/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py b/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py
index 4dec16f..c0f0d41 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/ru_execute_tasks.py
@@ -23,6 +23,7 @@ import re
 import os
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 import socket
+import traceback
 
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions.default import default
@@ -79,6 +80,7 @@ def resolve_ambari_config():
     else:
       raise Exception("No config found at %s" % str(config_path))
   except Exception, err:
+    traceback.print_exc()
     Logger.warning(err)
 
 


[11/25] ambari git commit: AMBARI-21012. Fix Livy service check and alerts script with SSL enabled (Saisai Shao via smohanty)

Posted by jl...@apache.org.
AMBARI-21012. Fix Livy service check and alerts script with SSL enabled (Saisai Shao via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/350dace6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/350dace6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/350dace6

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 350dace643e3edad2407c80f38edad5847b47706
Parents: 704eb52
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue May 16 19:20:24 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue May 16 19:20:24 2017 -0700

----------------------------------------------------------------------
 .../1.2.1/package/scripts/alerts/alert_spark_livy_port.py      | 6 ++++--
 .../common-services/SPARK/1.2.1/package/scripts/params.py      | 1 +
 .../SPARK/1.2.1/package/scripts/service_check.py               | 2 +-
 .../2.0.0/package/scripts/alerts/alert_spark2_livy_port.py     | 6 ++++--
 .../common-services/SPARK2/2.0.0/package/scripts/params.py     | 1 +
 .../SPARK2/2.0.0/package/scripts/service_check.py              | 2 +-
 6 files changed, 12 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/350dace6/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
index f15e747..f3a63b2 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
@@ -46,6 +46,7 @@ SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
 SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
 SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
 SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+LIVY_SSL_ENABLED_KEY = '{{livy-conf/livy.keystore}}'
 
 # The configured Kerberos executable search paths, if any
 KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
@@ -57,7 +58,7 @@ def get_tokens():
     Returns a tuple of tokens in the format {{site/property}} that will be used
     to build the dictionary passed into execute
     """
-    return (LIVY_SERVER_PORT_KEY,LIVYUSER_DEFAULT,SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,SMOKEUSER_KEY)
+    return (LIVY_SERVER_PORT_KEY,LIVYUSER_DEFAULT,SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,SMOKEUSER_KEY,LIVY_SSL_ENABLED_KEY)
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
 def execute(configurations={}, parameters={}, host_name=None):
@@ -118,13 +119,14 @@ def execute(configurations={}, parameters={}, host_name=None):
         finally:
             kinit_lock.release()
 
+    http_scheme = 'https' if LIVY_SSL_ENABLED_KEY in configurations else 'http'
     result_code = None
     try:
         start_time = time.time()
         try:
             livy_livyserver_host = str(host_name)
 
-            livy_cmd = format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k http://{livy_livyserver_host}:{port}/sessions | grep 200 ")
+            livy_cmd = format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {http_scheme}://{livy_livyserver_host}:{port}/sessions | grep 200 ")
 
             Execute(livy_cmd,
                     tries=3,

http://git-wip-us.apache.org/repos/asf/ambari/blob/350dace6/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
index 42396bd..74fd76a 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/params.py
@@ -242,6 +242,7 @@ if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stac
     livy_kerberos_principal = config['configurations']['livy-conf']['livy.server.launch.kerberos.principal']
 
   livy_livyserver_hosts = default("/clusterHostInfo/livy_server_hosts", [])
+  livy_http_scheme = 'https' if 'livy.keystore' in config['configurations']['livy-conf'] else 'http'
 
   # ats 1.5 properties
   entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/350dace6/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/service_check.py
index 9d74779..4699b2e 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/service_check.py
@@ -43,7 +43,7 @@ class SparkServiceCheck(Script):
       live_livyserver_host = "";
       for livyserver_host in params.livy_livyserver_hosts:
         try:
-          Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k http://{livyserver_host}:{livy_livyserver_port}/sessions | grep 200"),
+          Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {livy_http_scheme}://{livyserver_host}:{livy_livyserver_port}/sessions | grep 200"),
               tries=3,
               try_sleep=1,
               logoutput=True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/350dace6/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
index d69f663..525ac3c 100644
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
@@ -46,6 +46,7 @@ SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
 SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
 SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
 SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+LIVY_SSL_ENABLED_KEY = '{{livy2-conf/livy.keystore}}'
 
 # The configured Kerberos executable search paths, if any
 KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
@@ -57,7 +58,7 @@ def get_tokens():
     Returns a tuple of tokens in the format {{site/property}} that will be used
     to build the dictionary passed into execute
     """
-    return (LIVY_SERVER_PORT_KEY,LIVYUSER_DEFAULT,SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,SMOKEUSER_KEY)
+    return (LIVY_SERVER_PORT_KEY,LIVYUSER_DEFAULT,SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,SMOKEUSER_KEY,LIVY_SSL_ENABLED_KEY)
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
 def execute(configurations={}, parameters={}, host_name=None):
@@ -118,13 +119,14 @@ def execute(configurations={}, parameters={}, host_name=None):
         finally:
             kinit_lock.release()
 
+    http_scheme = 'https' if LIVY_SSL_ENABLED_KEY in configurations else 'http'
     result_code = None
     try:
         start_time = time.time()
         try:
             livy2_livyserver_host = str(host_name)
 
-            livy_cmd = format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k http://{livy2_livyserver_host}:{port}/sessions | grep 200 ")
+            livy_cmd = format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {http_scheme}://{livy2_livyserver_host}:{port}/sessions | grep 200 ")
 
             Execute(livy_cmd,
                     tries=3,

http://git-wip-us.apache.org/repos/asf/ambari/blob/350dace6/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/params.py
index 1df3f2f..74790ef 100755
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/params.py
@@ -229,6 +229,7 @@ if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY2, sta
     livy_kerberos_principal = config['configurations']['livy2-conf']['livy.server.launch.kerberos.principal']
 
   livy2_livyserver_hosts = default("/clusterHostInfo/livy2_server_hosts", [])
+  livy2_http_scheme = 'https' if 'livy.keystore' in config['configurations']['livy2-conf'] else 'http'
 
   # ats 1.5 properties
   entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/350dace6/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/service_check.py
index 8e7a766..7667191 100755
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/service_check.py
@@ -44,7 +44,7 @@ class SparkServiceCheck(Script):
       live_livyserver_host = ""
       for livyserver_host in params.livy2_livyserver_hosts:
         try:
-          Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k http://{livyserver_host}:{livy2_livyserver_port}/sessions | grep 200"),
+          Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {livy2_http_scheme}://{livyserver_host}:{livy2_livyserver_port}/sessions | grep 200"),
                   tries=3,
                   try_sleep=1,
                   logoutput=True,


[16/25] ambari git commit: AMBARI-20698. Ability to export blueprint via Ambari installer UI (alexantonenko)

Posted by jl...@apache.org.
AMBARI-20698. Ability to export blueprint via Ambari installer UI (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4427a336
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4427a336
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4427a336

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 4427a3365732dd971dc294f4feda75e247be4605
Parents: 99d4ca1
Author: Alex Antonenko <hi...@gmail.com>
Authored: Wed May 17 15:32:05 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Wed May 17 15:51:50 2017 +0300

----------------------------------------------------------------------
 .../app/controllers/wizard/step8_controller.js  | 167 ++++++++++++++++++-
 ambari-web/app/messages.js                      |   1 +
 ambari-web/app/templates/wizard/step8.hbs       |   5 +
 .../test/controllers/wizard/step8_test.js       | 132 +++++++++------
 4 files changed, 254 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4427a336/ambari-web/app/controllers/wizard/step8_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js b/ambari-web/app/controllers/wizard/step8_controller.js
index 4678d03..7e318e0 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -18,6 +18,7 @@
 
 var App = require('app');
 var stringUtils = require('utils/string_utils');
+var fileUtils = require('utils/file_utils');
 
 App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, App.wizardDeployProgressControllerMixin, App.ConfigOverridable, App.ConfigsSaverMixin, {
 
@@ -1764,7 +1765,171 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, App.wiz
     });
   },
 
+  getComponentsForHost: function(host) {
+    if(!host.hostComponents) {
+      App.router.get('installerController').get('allHosts');
+    }
+    var componentNameDetail = [];
+    host.hostComponents.mapProperty('componentName').forEach(function (componentName) {
+      if(componentName === 'CLIENT') {
+        this.get('content.clients').mapProperty('component_name').forEach(function (clientComponent) {
+          componentNameDetail.push({ name : clientComponent });
+        }, this);
+        return;
+      }
+      componentNameDetail.push({ name : componentName });
+    }, this);
+    return componentNameDetail;
+  },
+
+  getPropertyAttributesForConfigType : function(configs) {
+    //Currently only looks for final properties, if any
+    var finalProperties = configs.filterProperty('isFinal', 'true');
+    var propertyAttributes = {};
+    finalProperties.forEach(function (finalProperty) {
+      propertyAttributes[finalProperty['name']] = "true";
+    });
+    var finalPropertyMap = {};
+    if (!App.isEmptyObject(finalProperties)) {
+      finalPropertyMap = {
+        "isFinal": propertyAttributes
+      };
+    }
+    return finalPropertyMap;
+  },
+
+  getConfigurationDetailsForConfigType: function(configs) {
+    var configDetails = {};
+    var self = this;
+    configs.forEach(function (propertyObj) {
+      configDetails[propertyObj['name']] = propertyObj['value'];
+    }, this);
+    var configurationsDetails = {
+      "properties_attributes": self.getPropertyAttributesForConfigType(configs),
+      "properties": configDetails
+    };
+    return configurationsDetails;
+  },
+
+  hostInExistingHostGroup: function (newHost, cluster_template_host_groups) {
+    var hostGroupMatched = false;
+      cluster_template_host_groups.some(function (existingHostGroup) {
+        if(!hostGroupMatched) {
+        var fqdnInHostGroup =  existingHostGroup.hosts[0].fqdn;
+        var componentsInExistingHostGroup = this.getRegisteredHosts().filterProperty('hostName', fqdnInHostGroup)[0].hostComponents;
+        if(componentsInExistingHostGroup.length !== newHost.hostComponents.length) {
+          return;
+        } else {
+          var componentMismatch = false;
+          componentsInExistingHostGroup.forEach(function (componentInExistingHostGroup, index) {
+          if(!componentMismatch) {
+            if(!newHost.hostComponents.mapProperty('componentName').includes(componentInExistingHostGroup.componentName)) {
+              componentMismatch = true;
+            }
+          }
+          });
+          if(!componentMismatch) {
+            hostGroupMatched = true;
+            existingHostGroup["cardinality"]["cardinality"] = parseInt(existingHostGroup["cardinality"]["cardinality"]) + 1;
+            existingHostGroup.hosts.push({"fqdn" : newHost.hostName})
+            return true;
+          }
+        }
+        }
+      }, this);
+    return hostGroupMatched;
+  },
+
+  generateBlueprint: function () {
+    console.log("Prepare blueprint for download...");
+    var blueprint = {};
+    var self = this;
+    //service configurations
+    var totalConf = [];
+    //Add cluster-env
+    var clusterEnv = this.get('configs').filterProperty('filename', 'cluster-env.xml');
+    var configurations = {};
+    configurations["cluster-env"] = self.getConfigurationDetailsForConfigType(clusterEnv);
+    totalConf.push(configurations);
+    //Add configurations for selected services
+    this.get('selectedServices').forEach(function (service) {
+      Object.keys(service.get('configTypes')).forEach(function (type) {
+        if (!this.get('serviceConfigTags').someProperty('type', type)) {
+          var configs = this.get('configs').filterProperty('filename', App.config.getOriginalFileName(type));
+          var configurations = {};
+          configurations[type] = self.getConfigurationDetailsForConfigType(configs);
+          totalConf.push(configurations);
+        }
+      }, this);
+    }, this);
+
+    //TODO address configGroups
+    var host_groups = [];
+    var cluster_template_host_groups = [];
+    var counter = 0;
+
+    this.getRegisteredHosts().filterProperty('isInstalled', false).map(function (host) {
+      var clusterTemplateHostGroupDetail = {};
+      if(self.hostInExistingHostGroup(host, cluster_template_host_groups)) {
+        return;
+      }
+
+      var hostGroupId = "host_group_" + counter;
+      var cardinality = {"cardinality": 1};
+      var hostGroupDetail = {
+        "name": hostGroupId,
+        "components": self.getComponentsForHost(host),
+        cardinality
+      };
+      hostGroupDetail.toJSON = function () {
+      var hostGroupDetailResult = {};
+      for (var x in this) {
+        if (x === "cardinality") {
+          hostGroupDetailResult[x] = (this[x]["cardinality"]).toString();
+          } else {
+            hostGroupDetailResult[x] = this[x];
+        }
+      }
+      return hostGroupDetailResult;
+      }
+      host_groups.push(hostGroupDetail);
+      var hostListForGroup = [];
+      var hostDetail = {
+        "fqdn": host.hostName
+      }
+      hostListForGroup.push(hostDetail);
+      clusterTemplateHostGroupDetail = {
+        "name": hostGroupId,
+        "hosts": hostListForGroup,
+        cardinality
+      };
+      clusterTemplateHostGroupDetail.toJSON = function () {
+        return _.omit(this, [ "cardinality" ]);
+      };
+
+      cluster_template_host_groups.push(clusterTemplateHostGroupDetail);
+      counter++;
+    }, this);
+
+    var selectedStack = App.Stack.find().findProperty('isSelected', true);
+    blueprint = { //TODO: bp name
+        'configurations':totalConf,
+        'host_groups':host_groups,
+        'Blueprints':{'stack_name':selectedStack.get('stackName'), 'stack_version':selectedStack.get('stackVersion')}
+    };
+    fileUtils.downloadTextFile(JSON.stringify(blueprint), 'json', 'blueprint.json')
+
+    var cluster_template = {
+      "blueprint": App.clusterStatus.clusterName,
+      "config_recommendation_strategy" : "NEVER_APPLY",
+      "provision_action" : "INSTALL_AND_START",
+      "configurations":[],
+      "host_groups":cluster_template_host_groups
+    };
+    fileUtils.downloadTextFile(JSON.stringify(cluster_template), 'json', 'clustertemplate.json')
+  },
+
   downloadCSV: function() {
     App.router.get('kerberosWizardStep5Controller').getCSVData(false);
   }
-});
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/4427a336/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index b8e7728..f34cbdc 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -125,6 +125,7 @@ Em.I18n.translations = {
   'common.more': 'More...',
   'common.print':'Print',
   'common.deploy':'Deploy',
+  'common.generate.blueprint':'Generate Blueprint',
   'common.message':'Message',
   'common.tasks':'Tasks',
   'common.open':'Open',

http://git-wip-us.apache.org/repos/asf/ambari/blob/4427a336/ambari-web/app/templates/wizard/step8.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step8.hbs b/ambari-web/app/templates/wizard/step8.hbs
index ac32710..907b3d4 100644
--- a/ambari-web/app/templates/wizard/step8.hbs
+++ b/ambari-web/app/templates/wizard/step8.hbs
@@ -99,5 +99,10 @@
     </button>
     <button type="button" class="btn btn-info pull-right" {{action printReview target="view"}}>{{t common.print}}</button>
     <button type="button" {{bindAttr class=":btn :btn-primary :pull-right :mrm controller.showDownloadCsv::hide"}} {{action downloadCSV target="controller"}}>{{t admin.kerberos.wizard.step5.downloadCSV}}</button>
+    {{#unless App.router.clusterInstallCompleted}}
+      <button type="button" class="btn btn-success pull-right" {{action generateBlueprint target="controller"}}><i class="glyphicon glyphicon-download-alt"></i>&nbsp;
+        {{t common.generate.blueprint}}
+      </button>
+    {{/unless}}
   </div>
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4427a336/ambari-web/test/controllers/wizard/step8_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard/step8_test.js b/ambari-web/test/controllers/wizard/step8_test.js
index 7cdb69a..1a3214e 100644
--- a/ambari-web/test/controllers/wizard/step8_test.js
+++ b/ambari-web/test/controllers/wizard/step8_test.js
@@ -50,7 +50,73 @@ var configs = Em.A([
   Em.Object.create({filename: 'falcon-startup.properties.xml', name: 'p1', value: 'v1'}),
   Em.Object.create({filename: 'falcon-startup.properties.xml', name: 'p2', value: 'v2'}),
   Em.Object.create({filename: 'falcon-runtime.properties.xml', name: 'p1', value: 'v1'}),
-  Em.Object.create({filename: 'falcon-runtime.properties.xml', name: 'p2', value: 'v2'})
+  Em.Object.create({filename: 'falcon-runtime.properties.xml', name: 'p2', value: 'v2'}),
+  Em.Object.create({filename: 'cluster-env.xml', name: 'p1', value: 'v1'}),
+]);
+
+var services = Em.A([
+        Em.Object.create({
+          serviceName: 's1',
+          isSelected: true,
+          isInstalled: false,
+          displayNameOnSelectServicePage: 's01',
+          isClientOnlyService: false,
+          serviceComponents: Em.A([
+            Em.Object.create({
+              isClient: true
+            })
+          ]),
+          configTypes: Em.A([
+            Em.Object.create({
+              type: 'cluster-env'
+            })
+          ]),
+          isHiddenOnSelectServicePage: false
+        }),
+        Em.Object.create({
+          serviceName: 's2',
+          isSelected: true,
+          isInstalled: false,
+          displayNameOnSelectServicePage: 's02',
+          serviceComponents: Em.A([
+            Em.Object.create({
+              isMaster: true
+            })
+          ]),
+          configTypes: Em.A([
+            Em.Object.create({
+              type: 'hdfs-site'
+            })
+          ]),
+          isHiddenOnSelectServicePage: false
+        }),
+        Em.Object.create({
+          serviceName: 's3',
+          isSelected: true,
+          isInstalled: false,
+          displayNameOnSelectServicePage: 's03',
+          serviceComponents: Em.A([
+            Em.Object.create({
+              isHAComponentOnly: true
+            })
+          ]),
+          configTypes: [],
+          isHiddenOnSelectServicePage: false
+        }),
+        Em.Object.create({
+          serviceName: 's4',
+          isSelected: true,
+          isInstalled: false,
+          displayNameOnSelectServicePage: 's03',
+          isClientOnlyService: true,
+          serviceComponents: Em.A([
+            Em.Object.create({
+              isClient: true
+            })
+          ]),
+          configTypes: [],
+          isHiddenOnSelectServicePage: false
+        })
 ]);
 
 function getController() {
@@ -211,54 +277,6 @@ describe('App.WizardStep8Controller', function () {
   describe('#loadServices', function () {
 
     beforeEach(function () {
-      var services = Em.A([
-        Em.Object.create({
-          serviceName: 's1',
-          isSelected: true,
-          displayNameOnSelectServicePage: 's01',
-          isClientOnlyService: false,
-          serviceComponents: Em.A([
-            Em.Object.create({
-              isClient: true
-            })
-          ]),
-          isHiddenOnSelectServicePage: false
-        }),
-        Em.Object.create({
-          serviceName: 's2',
-          isSelected: true,
-          displayNameOnSelectServicePage: 's02',
-          serviceComponents: Em.A([
-            Em.Object.create({
-              isMaster: true
-            })
-          ]),
-          isHiddenOnSelectServicePage: false
-        }),
-        Em.Object.create({
-          serviceName: 's3',
-          isSelected: true,
-          displayNameOnSelectServicePage: 's03',
-          serviceComponents: Em.A([
-            Em.Object.create({
-              isHAComponentOnly: true
-            })
-          ]),
-          isHiddenOnSelectServicePage: false
-        }),
-        Em.Object.create({
-          serviceName: 's4',
-          isSelected: true,
-          displayNameOnSelectServicePage: 's03',
-          isClientOnlyService: true,
-          serviceComponents: Em.A([
-            Em.Object.create({
-              isClient: true
-            })
-          ]),
-          isHiddenOnSelectServicePage: false
-        })
-      ]);
       var selectedServices = services.filterProperty('isSelected');
       var slaveComponentHosts = Em.A([
         Em.Object.create({
@@ -2323,5 +2341,19 @@ describe('App.WizardStep8Controller', function () {
     });
 
   });
-
+  //TODO
+  describe('#generateBlueprint', function () {
+     console.log("testing.......")
+     beforeEach(function () {
+         installerStep8Controller = getController();
+         installerStep8Controller.set('configs', configs);
+         installerStep8Controller.set('content.services', services.filterProperty('isSelected'))
+         installerStep8Controller.set('selectedServices', []);
+         sinon.spy(installerStep8Controller, 'getConfigurationDetailsForConfigType');
+     });
+     it('should call generateBlueprint', function() {
+       installerStep8Controller.generateBlueprint();
+       expect(installerStep8Controller.getConfigurationDetailsForConfigType.calledThrice).to.be.true;
+     });
+ });
 });


[06/25] ambari git commit: AMBARI-21034. Ambari 3.0: Outstanding Cluster admin issues (alexantonenko)

Posted by jl...@apache.org.
AMBARI-21034. Ambari 3.0: Outstanding Cluster admin issues (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7799fb76
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7799fb76
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7799fb76

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 7799fb76c892c793803233a859f2b94b0b46d9e8
Parents: b333912
Author: Alex Antonenko <hi...@gmail.com>
Authored: Tue May 16 18:45:08 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue May 16 23:53:37 2017 +0300

----------------------------------------------------------------------
 .../resources/ui/admin-web/app/views/main.html  | 50 +++++++++++++++-----
 ambari-web/app/messages.js                      |  2 +-
 2 files changed, 40 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7799fb76/ambari-admin/src/main/resources/ui/admin-web/app/views/main.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/main.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/main.html
index d62ae15..3bdb80e 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/main.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/main.html
@@ -18,51 +18,79 @@
 <div class="panel panel-default mainpage">
   <div class="panel-body">
     <h1>{{'main.title' | translate}}</h1>
+
     <div ng-if="isLoaded" id="main-operations-boxes" class="row thumbnails">
       <p ng-hide="cluster">{{'main.noClusterDescription' | translate}}</p>
+
       <p ng-show="cluster">{{'main.hasClusterDescription' | translate}}</p>
 
-        <!--Clusters-->
+      <!--Clusters-->
       <div ng-show="cluster" class="col-sm-11 thumbnail">
         <h4 class="title">{{'main.operateCluster.title' | translate}}</h4>
+
         <div class="description">{{'main.operateCluster.description' | translate}}</div>
         <div class="glyphicon glyphicon-cloud"></div>
         <div class="buttons">
         <span ng-class="{active: isActive('clusters.manageAccess')}">
-          <a ng-show="cluster.Clusters.provisioning_state != 'INSTALLED'" href class="btn btn-primary permission-button" ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'main.operateCluster.manageRoles' | translate}}</a>
-          <a ng-show="cluster.Clusters.provisioning_state == 'INSTALLED'" href="#/clusters/{{cluster.Clusters.cluster_name}}/userAccessList" class="btn btn-primary permission-button" ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'main.operateCluster.manageRoles' | translate}}</a>
+          <a ng-show="cluster.Clusters.provisioning_state != 'INSTALLED'" href class="btn btn-primary permission-button"
+             ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'main.operateCluster.manageRoles' |
+            translate}}</a>
+          <a ng-show="cluster.Clusters.provisioning_state == 'INSTALLED'"
+             href="#/clusters/{{cluster.Clusters.cluster_name}}/userAccessList"
+             class="btn btn-primary permission-button"
+             ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'main.operateCluster.manageRoles' |
+            translate}}</a>
         </span>
         <span>
-          <a ng-show="cluster.Clusters.provisioning_state != 'INSTALLED'" href class="btn btn-primary go-dashboard-button" ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'common.goToDashboard' | translate}}</a>
-          <a ng-show="cluster.Clusters.provisioning_state == 'INSTALLED'" href="{{fromSiteRoot('/#/main/dashboard/metrics')}}" class="btn btn-primary go-dashboard-button" ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'common.goToDashboard' | translate}}</a>
+          <a ng-show="cluster.Clusters.provisioning_state != 'INSTALLED'" href
+             class="btn btn-primary go-dashboard-button"
+             ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'common.goToDashboard' |
+            translate}}</a>
+          <a ng-show="cluster.Clusters.provisioning_state == 'INSTALLED'"
+             href="{{fromSiteRoot('/#/main/dashboard/metrics')}}" class="btn btn-primary go-dashboard-button"
+             ng-disabled="cluster.Clusters.provisioning_state != 'INSTALLED' ">{{'common.goToDashboard' |
+            translate}}</a>
         </span>
         </div>
       </div>
       <div ng-hide="cluster" class="col-sm-11 thumbnail">
         <h4 class="title">{{'main.createCluster.title' | translate}}</h4>
+
         <div class="description">{{'main.createCluster.description' | translate}}</div>
         <div class="glyphicon glyphicon-cloud"></div>
-        <div class="buttons"> <a href="{{fromSiteRoot('/#/installer/step0')}}" class="btn btn-primary create-cluster-button">{{'main.createCluster.launchInstallWizard' | translate}}</a></div>
+        <div class="buttons"><a href="{{fromSiteRoot('/#/installer/step0')}}"
+                                class="btn btn-primary create-cluster-button">{{'main.createCluster.launchInstallWizard'
+          | translate}}</a></div>
       </div>
 
-        <!--Manage Users and groups-->
+      <!--Manage Users and groups-->
       <div class="col-sm-5 thumbnail">
         <h4 class="title">{{'main.manageUsersAndGroups.title' | translate}}</h4>
+
         <div class="description">{{'main.manageUsersAndGroups.description' | translate}}</div>
         <div class="glyphicon glyphicon-user"></div>
         <div class="buttons">
-          <span ng-class="{active: isActive('users.list')}"><link-to route="users.list" class="btn btn-primary userslist-button">{{'common.users' | translate}}</link-to></span>
-          <span ng-class="{active: isActive('groups.list')}"><link-to route="groups.list" class="btn btn-primary groupslist-button">{{'common.groups' | translate}}</link-to></span>
+          <span ng-class="{active: isActive('users.list')}"><link-to route="users.list"
+                                                                     class="btn btn-primary userslist-button">
+            {{'common.users' | translate}}
+          </link-to></span>
+          <span ng-class="{active: isActive('groups.list')}"><link-to route="groups.list"
+                                                                      class="btn btn-primary groupslist-button">
+            {{'common.groups' | translate}}
+          </link-to></span>
         </div>
       </div>
 
-        <!--Deploy Views-->
+      <!--Deploy Views-->
       <div class="col-sm-5 thumbnail">
         <h4 class="title">{{'main.deployViews.title' | translate}}</h4>
+
         <div class="description">{{'main.deployViews.description' | translate}}</div>
         <div class="glyphicon glyphicon-th"></div>
-        <div ng-class="{active: isActive('views.list')}" class="buttons"><link-to route="views.list" class="btn btn-primary viewslist-button">{{'common.views' | translate}}</link-to></div>
+        <div ng-class="{active: isActive('views.list')}" class="buttons">
+          <link-to route="views.list" class="btn btn-primary viewslist-button">{{'common.views' | translate}}</link-to>
         </div>
       </div>
+    </div>
   </div>
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7799fb76/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 8f8d981..b8e7728 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -3165,7 +3165,7 @@ Em.I18n.translations = {
   'menu.item.dashboard':'Dashboard',
   'menu.item.services':'Services',
   'menu.item.hosts':'Hosts',
-  'menu.item.admin':'Admin',
+  'menu.item.admin':'Cluster Admin',
   'menu.item.alerts': 'Alerts',
   'menu.item.views':'<i class="glyphicon glyphicon-th"></i>',
   'menu.item.views.noViews':'No Views',


[14/25] ambari git commit: AMBARI-20881 Add Log Level Filter to the Log Search config API (mgergely)

Posted by jl...@apache.org.
AMBARI-20881 Add Log Level Filter to the Log Search config API (mgergely)

Change-Id: I8e3d5a628d02407ad2af4ecb77fff3ada10f7707


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3b94d3cf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3b94d3cf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3b94d3cf

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 3b94d3cff380bf3557af57c6ecd9005ab46b8916
Parents: f18a822
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Wed May 17 14:47:06 2017 +0200
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Wed May 17 14:47:40 2017 +0200

----------------------------------------------------------------------
 .../ambari-logsearch-config-api/pom.xml         |   2 +-
 .../config/api/InputConfigMonitor.java          |   4 +-
 .../config/api/LogLevelFilterMonitor.java       |  44 ++++
 .../logsearch/config/api/LogSearchConfig.java   |  57 ++++-
 .../model/loglevelfilter/LogLevelFilter.java    |  79 +++++++
 .../model/loglevelfilter/LogLevelFilterMap.java |  33 +++
 .../config/api/LogSearchConfigClass1.java       |  21 +-
 .../config/api/LogSearchConfigClass2.java       |  21 +-
 .../ambari-logsearch-config-zookeeper/pom.xml   |   4 +
 .../config/zookeeper/LogSearchConfigZK.java     | 191 ++++++++++++-----
 .../org/apache/ambari/logfeeder/LogFeeder.java  |   6 +-
 .../logfeeder/input/InputConfigUploader.java    |   2 +-
 .../logfeeder/logconfig/FilterLogData.java      |  87 --------
 .../logfeeder/logconfig/LogConfigFetcher.java   | 168 ---------------
 .../logfeeder/logconfig/LogConfigHandler.java   | 213 -------------------
 .../logfeeder/logconfig/LogFeederFilter.java    |  90 --------
 .../logconfig/LogFeederFilterWrapper.java       |  55 -----
 .../logfeeder/loglevelfilter/FilterLogData.java |  73 +++++++
 .../loglevelfilter/LogLevelFilterHandler.java   | 157 ++++++++++++++
 .../ambari/logfeeder/output/OutputManager.java  |   2 +-
 .../ambari/logfeeder/util/LogFeederUtil.java    |  19 --
 .../logconfig/LogConfigHandlerTest.java         |  90 ++++----
 .../src/test/resources/logfeeder.properties     |   3 +-
 .../configurer/LogfeederFilterConfigurer.java   |  66 ------
 .../ambari/logsearch/dao/UserConfigSolrDao.java |  79 -------
 .../ambari/logsearch/doc/DocConstants.java      |  10 +-
 .../logsearch/manager/ShipperConfigManager.java |  45 +++-
 .../logsearch/manager/UserConfigManager.java    |  24 ---
 .../model/common/LSServerLogLevelFilter.java    | 100 +++++++++
 .../model/common/LSServerLogLevelFilterMap.java |  65 ++++++
 .../model/common/LogFeederDataMap.java          |  50 -----
 .../model/common/LogfeederFilterData.java       |  87 --------
 .../logsearch/rest/ShipperConfigResource.java   |  43 +++-
 .../logsearch/rest/UserConfigResource.java      |  18 --
 .../webapp/templates/common/Header_tmpl.html    |   5 +-
 .../server/upgrade/UpgradeCatalog300.java       |  15 ++
 .../configuration/logfeeder-properties.xml      |  10 +
 .../configuration/logsearch-properties.xml      |  10 -
 .../LOGSEARCH/0.5.0/themes/theme.json           |   4 +-
 .../server/upgrade/UpgradeCatalog300Test.java   |  29 +++
 40 files changed, 982 insertions(+), 1099 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-api/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/pom.xml b/ambari-logsearch/ambari-logsearch-config-api/pom.xml
index e9abed0..72fcc80 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-config-api/pom.xml
@@ -33,7 +33,7 @@
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
   </properties>
-  
+
   <dependencies>
     <dependency>
       <groupId>junit</groupId>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
index df26920..29a82a6 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/InputConfigMonitor.java
@@ -20,7 +20,7 @@
 package org.apache.ambari.logsearch.config.api;
 
 /**
- * Monitors input configuration changes. 
+ * Monitors input configuration changes.
  */
 public interface InputConfigMonitor {
   /**
@@ -31,7 +31,7 @@ public interface InputConfigMonitor {
    * @throws Exception
    */
   void loadInputConfigs(String serviceName, String inputConfig) throws Exception;
-  
+
   /**
    * Notification of the removal of an input configuration.
    * 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogLevelFilterMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogLevelFilterMonitor.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogLevelFilterMonitor.java
new file mode 100644
index 0000000..766f751
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogLevelFilterMonitor.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Monitors log level filter changes.
+ */
+package org.apache.ambari.logsearch.config.api;
+
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+
+public interface LogLevelFilterMonitor {
+
+  /**
+   * Notification of a new or updated log level filter.
+   * 
+   * @param logId The log for which the log level filter was created/updated.
+   * @param logLevelFilter The log level filter to apply from now on to the log.
+   */
+  void setLogLevelFilter(String logId, LogLevelFilter logLevelFilter);
+
+  /**
+   * Notification of the removal of a log level filter.
+   * 
+   * @param logId The log of which's log level filter was removed.
+   */
+  void removeLogLevelFilter(String logId);
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
index 0bb0b78..07921d0 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/LogSearchConfig.java
@@ -23,6 +23,9 @@ import java.io.Closeable;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
+
 /**
  * Log Search Configuration, which uploads, retrieves configurations, and monitors it's changes.
  */
@@ -33,7 +36,7 @@ public interface LogSearchConfig extends Closeable {
   public enum Component {
     SERVER, LOGFEEDER;
   }
-  
+
   /**
    * Initialization of the configuration.
    * 
@@ -42,7 +45,7 @@ public interface LogSearchConfig extends Closeable {
    * @throws Exception
    */
   void init(Component component, Map<String, String> properties) throws Exception;
-  
+
   /**
    * Returns all the service names with input configurations of a cluster. Will be used only in SERVER mode.
    * 
@@ -50,7 +53,7 @@ public interface LogSearchConfig extends Closeable {
    * @return List of the service names.
    */
   List<String> getServices(String clusterName);
-  
+
   /**
    * Checks if input configuration exists.
    * 
@@ -60,7 +63,7 @@ public interface LogSearchConfig extends Closeable {
    * @throws Exception
    */
   boolean inputConfigExists(String clusterName, String serviceName) throws Exception;
-  
+
   /**
    * Returns the input configuration of a service in a cluster. Will be used only in SERVER mode.
    * 
@@ -69,7 +72,7 @@ public interface LogSearchConfig extends Closeable {
    * @return The input configuration for the service if it exists, null otherwise.
    */
   String getInputConfig(String clusterName, String serviceName);
-  
+
   /**
    * Uploads the input configuration for a service in a cluster.
    * 
@@ -78,13 +81,51 @@ public interface LogSearchConfig extends Closeable {
    * @param inputConfig The input configuration of the service.
    * @throws Exception
    */
+  void createInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception;
+
+  /**
+   * Modifies the input configuration for a service in a cluster.
+   * 
+   * @param clusterName The name of the cluster where the service is.
+   * @param serviceName The name of the service of which's input configuration is uploaded.
+   * @param inputConfig The input configuration of the service.
+   * @throws Exception
+   */
   void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception;
-  
+
+  /**
+   * Uploads the log level filter of a log.
+   * 
+   * @param clusterName The name of the cluster where the log is.
+   * @param logId The id of the log.
+   * @param filter The log level filter for the log.
+   * @throws Exception 
+   */
+  void createLogLevelFilter(String clusterName, String logId, LogLevelFilter filter) throws Exception;
+
+  /**
+   * Modifies the log level filters for all the logs.
+   * 
+   * @param clusterName The name of the cluster where the logs are.
+   * @param filters The log level filters to set.
+   * @throws Exception
+   */
+  void setLogLevelFilters(String clusterName, LogLevelFilterMap filters) throws Exception;
+
+  /**
+   * Returns the Log Level Filters of a cluster.
+   * 
+   * @param clusterName The name of the cluster which's log level filters are required.
+   * @return All the log level filters of the cluster.
+   */
+  LogLevelFilterMap getLogLevelFilters(String clusterName);
+
   /**
    * Starts the monitoring of the input configurations, asynchronously. Will be used only in LOGFEEDER mode.
    * 
-   * @param configMonitor The input config monitor to call in case of a config change.
+   * @param inputConfigMonitor The input config monitor to call in case of an input config change.
+   * @param logLevelFilterMonitor The log level filter monitor to call in case of a log level filter change.
    * @throws Exception
    */
-  void monitorInputConfigChanges(InputConfigMonitor configMonitor) throws Exception;
+  void monitorInputConfigChanges(InputConfigMonitor inputConfigMonitor, LogLevelFilterMonitor logLevelFilterMonitor) throws Exception;
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilter.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilter.java
new file mode 100644
index 0000000..06cf589
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilter.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api.model.loglevelfilter;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+
+public class LogLevelFilter {
+
+  private String label;
+  private List<String> hosts;
+  private List<String> defaultLevels;
+  private List<String> overrideLevels;
+  private Date expiryTime;
+
+  public LogLevelFilter() {
+    hosts = new ArrayList<String>();
+    defaultLevels = new ArrayList<String>();
+    overrideLevels = new ArrayList<String>();
+  }
+
+  public String getLabel() {
+    return label;
+  }
+
+  public void setLabel(String label) {
+    this.label = label;
+  }
+
+  public List<String> getHosts() {
+    return hosts;
+  }
+
+  public void setHosts(List<String> hosts) {
+    this.hosts = hosts;
+  }
+
+  public List<String> getDefaultLevels() {
+    return defaultLevels;
+  }
+
+  public void setDefaultLevels(List<String> defaultLevels) {
+    this.defaultLevels = defaultLevels;
+  }
+
+  public List<String> getOverrideLevels() {
+    return overrideLevels;
+  }
+
+  public void setOverrideLevels(List<String> overrideLevels) {
+    this.overrideLevels = overrideLevels;
+  }
+
+  public Date getExpiryTime() {
+    return expiryTime;
+  }
+
+  public void setExpiryTime(Date expiryTime) {
+    this.expiryTime = expiryTime;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilterMap.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilterMap.java b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilterMap.java
new file mode 100644
index 0000000..37fdb9f
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/main/java/org/apache/ambari/logsearch/config/api/model/loglevelfilter/LogLevelFilterMap.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ambari.logsearch.config.api.model.loglevelfilter;
+
+import java.util.TreeMap;
+
+public class LogLevelFilterMap {
+  private TreeMap<String, LogLevelFilter> filter;
+
+  public TreeMap<String, LogLevelFilter> getFilter() {
+    return filter;
+  }
+
+  public void setFilter(TreeMap<String, LogLevelFilter> filter) {
+    this.filter = filter;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
index 969eb30..fc3fe5b 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass1.java
@@ -24,6 +24,8 @@ import java.util.Map;
 
 import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
 import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
 
 public class LogSearchConfigClass1 implements LogSearchConfig {
   @Override
@@ -35,10 +37,14 @@ public class LogSearchConfigClass1 implements LogSearchConfig {
   }
 
   @Override
+  public void createInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {}
+
+  @Override
   public void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {}
 
   @Override
-  public void monitorInputConfigChanges(InputConfigMonitor configMonitor) throws Exception {}
+  public void monitorInputConfigChanges(InputConfigMonitor inputConfigMonitor, LogLevelFilterMonitor logLevelFilterMonitor)
+      throws Exception {}
 
   @Override
   public List<String> getServices(String clusterName) {
@@ -49,7 +55,18 @@ public class LogSearchConfigClass1 implements LogSearchConfig {
   public String getInputConfig(String clusterName, String serviceName) {
     return null;
   }
-  
+
+  @Override
+  public void createLogLevelFilter(String clusterName, String logId, LogLevelFilter filter) {}
+
+  @Override
+  public void setLogLevelFilters(String clusterName, LogLevelFilterMap filters) throws Exception {}
+
+  @Override
+  public LogLevelFilterMap getLogLevelFilters(String clusterName) {
+    return null;
+  }
+
   @Override
   public void close() {}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
index 664ecc9..346edb3 100644
--- a/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
+++ b/ambari-logsearch/ambari-logsearch-config-api/src/test/java/org/apache/ambari/logsearch/config/api/LogSearchConfigClass2.java
@@ -24,6 +24,8 @@ import java.util.Map;
 
 import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
 import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
 
 public class LogSearchConfigClass2 implements LogSearchConfig {
   @Override
@@ -35,10 +37,14 @@ public class LogSearchConfigClass2 implements LogSearchConfig {
   }
 
   @Override
+  public void createInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {}
+
+  @Override
   public void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {}
 
   @Override
-  public void monitorInputConfigChanges(InputConfigMonitor configMonitor) throws Exception {}
+  public void monitorInputConfigChanges(InputConfigMonitor inputConfigMonitor, LogLevelFilterMonitor logLevelFilterMonitor)
+      throws Exception {}
 
   @Override
   public List<String> getServices(String clusterName) {
@@ -49,7 +55,18 @@ public class LogSearchConfigClass2 implements LogSearchConfig {
   public String getInputConfig(String clusterName, String serviceName) {
     return null;
   }
-  
+
+  @Override
+  public void createLogLevelFilter(String clusterName, String logId, LogLevelFilter filter) {}
+
+  @Override
+  public void setLogLevelFilters(String clusterName, LogLevelFilterMap filters) throws Exception {}
+
+  @Override
+  public LogLevelFilterMap getLogLevelFilters(String clusterName) {
+    return null;
+  }
+
   @Override
   public void close() {}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml b/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
index 4ed8eba..2c59a4a 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/pom.xml
@@ -70,5 +70,9 @@
       <artifactId>curator-recipes</artifactId>
       <version>2.12.0</version>
     </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+    </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
index 738fde2..5e22374 100644
--- a/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
+++ b/ambari-logsearch/ambari-logsearch-config-zookeeper/src/main/java/org/apache/ambari/logsearch/config/zookeeper/LogSearchConfigZK.java
@@ -22,9 +22,13 @@ package org.apache.ambari.logsearch.config.zookeeper;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
 
 import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilterMap;
 import org.apache.ambari.logsearch.config.api.InputConfigMonitor;
+import org.apache.ambari.logsearch.config.api.LogLevelFilterMonitor;
 import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.curator.framework.CuratorFramework;
@@ -32,15 +36,19 @@ import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.framework.recipes.cache.ChildData;
 import org.apache.curator.framework.recipes.cache.TreeCache;
 import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
+import org.apache.curator.framework.recipes.cache.TreeCacheEvent.Type;
 import org.apache.curator.framework.recipes.cache.TreeCacheListener;
 import org.apache.curator.retry.ExponentialBackoffRetry;
 import org.apache.curator.utils.ZKPaths;
 import org.apache.log4j.Logger;
+import org.apache.zookeeper.KeeperException.NodeExistsException;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.data.Id;
 
 import com.google.common.base.Splitter;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
 
 public class LogSearchConfigZK implements LogSearchConfig {
   private static final Logger LOG = Logger.getLogger(LogSearchConfigZK.class);
@@ -49,6 +57,7 @@ public class LogSearchConfigZK implements LogSearchConfig {
   private static final int CONNECTION_TIMEOUT = 30000;
   private static final String DEFAULT_ZK_ROOT = "/logsearch";
   private static final long WAIT_FOR_ROOT_SLEEP_SECONDS = 10;
+  private static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS";
 
   private static final String CLUSTER_NAME_PROPERTY = "cluster.name";
   private static final String ZK_CONNECT_STRING_PROPERTY = "logsearch.config.zk_connect_string";
@@ -59,6 +68,7 @@ public class LogSearchConfigZK implements LogSearchConfig {
   private String root;
   private CuratorFramework client;
   private TreeCache cache;
+  private Gson gson;
 
   @Override
   public void init(Component component, Map<String, String> properties) throws Exception {
@@ -89,6 +99,8 @@ public class LogSearchConfigZK implements LogSearchConfig {
 
       cache = new TreeCache(client, String.format("%s/%s", root, properties.get(CLUSTER_NAME_PROPERTY)));
     }
+    
+    gson = new GsonBuilder().setDateFormat(DATE_FORMAT).create();
   }
 
   @Override
@@ -98,66 +110,43 @@ public class LogSearchConfigZK implements LogSearchConfig {
   }
 
   @Override
-  public void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {
+  public void createInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {
     String nodePath = String.format("%s/%s/input/%s", root, clusterName, serviceName);
-    client.create().creatingParentContainersIfNeeded().withACL(getAcls()).forPath(nodePath, inputConfig.getBytes());
-    LOG.info("Set input config for the service " + serviceName + " for cluster " + clusterName);
-  }
-
-  private List<ACL> getAcls() {
-    String aclStr = properties.get(ZK_ACLS_PROPERTY);
-    if (StringUtils.isBlank(aclStr)) {
-      return ZooDefs.Ids.OPEN_ACL_UNSAFE;
+    try {
+      client.create().creatingParentContainersIfNeeded().withACL(getAcls()).forPath(nodePath, inputConfig.getBytes());
+      LOG.info("Uploaded input config for the service " + serviceName + " for cluster " + clusterName);
+    } catch (NodeExistsException e) {
+      LOG.debug("Did not upload input config for service " + serviceName + " as it was already uploaded by another Log Feeder");
     }
-
-    List<ACL> acls = new ArrayList<>();
-    List<String> aclStrList = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(aclStr);
-    for (String unparcedAcl : aclStrList) {
-      String[] parts = unparcedAcl.split(":");
-      if (parts.length == 3) {
-        acls.add(new ACL(parsePermission(parts[2]), new Id(parts[0], parts[1])));
-      }
-    }
-    return acls;
   }
 
-  private Integer parsePermission(String permission) {
-    int permissionCode = 0;
-    for (char each : permission.toLowerCase().toCharArray()) {
-      switch (each) {
-        case 'r':
-          permissionCode |= ZooDefs.Perms.READ;
-          break;
-        case 'w':
-          permissionCode |= ZooDefs.Perms.WRITE;
-          break;
-        case 'c':
-          permissionCode |= ZooDefs.Perms.CREATE;
-          break;
-        case 'd':
-          permissionCode |= ZooDefs.Perms.DELETE;
-          break;
-        case 'a':
-          permissionCode |= ZooDefs.Perms.ADMIN;
-          break;
-        default:
-          throw new IllegalArgumentException("Unsupported permission: " + permission);
-      }
-    }
-    return permissionCode;
+  @Override
+  public void setInputConfig(String clusterName, String serviceName, String inputConfig) throws Exception {
+    String nodePath = String.format("%s/%s/input/%s", root, clusterName, serviceName);
+    client.setData().forPath(nodePath, inputConfig.getBytes());
+    LOG.info("Set input config for the service " + serviceName + " for cluster " + clusterName);
   }
 
   @Override
-  public void monitorInputConfigChanges(final InputConfigMonitor configMonitor) throws Exception {
+  public void monitorInputConfigChanges(final InputConfigMonitor inputConfigMonitor,
+      final LogLevelFilterMonitor logLevelFilterMonitor ) throws Exception {
     TreeCacheListener listener = new TreeCacheListener() {
       public void childEvent(CuratorFramework client, TreeCacheEvent event) throws Exception {
-        if (!event.getData().getPath().startsWith(String.format("%s/%s/input/", root, properties.get(CLUSTER_NAME_PROPERTY)))) {
-          return;
-        }
-        
         String nodeName = ZKPaths.getNodeFromPath(event.getData().getPath());
         String nodeData = new String(event.getData().getData());
-        switch (event.getType()) {
+        Type eventType = event.getType();
+        
+        String configPathStab = String.format("%s/%s/", root, properties.get(CLUSTER_NAME_PROPERTY));
+        
+        if (event.getData().getPath().startsWith(configPathStab + "input/")) {
+          handleInputConfigChange(eventType, nodeName, nodeData);
+        } else if (event.getData().getPath().startsWith(configPathStab + "loglevelfilter/")) {
+          handleLogLevelFilterChange(eventType, nodeName, nodeData);
+        }
+      }
+
+      private void handleInputConfigChange(Type eventType, String nodeName, String nodeData) {
+        switch (eventType) {
           case NODE_ADDED:
             LOG.info("Node added under input ZK node: " + nodeName);
             addInputs(nodeName, nodeData);
@@ -177,16 +166,33 @@ public class LogSearchConfigZK implements LogSearchConfig {
       }
 
       private void removeInputs(String serviceName) {
-        configMonitor.removeInputs(serviceName);
+        inputConfigMonitor.removeInputs(serviceName);
       }
 
       private void addInputs(String serviceName, String inputConfig) {
         try {
-          configMonitor.loadInputConfigs(serviceName, inputConfig);
+          inputConfigMonitor.loadInputConfigs(serviceName, inputConfig);
         } catch (Exception e) {
           LOG.error("Could not load input configuration for service " + serviceName + ":\n" + inputConfig, e);
         }
       }
+
+      private void handleLogLevelFilterChange(Type eventType, String nodeName, String nodeData) {
+        switch (eventType) {
+          case NODE_ADDED:
+          case NODE_UPDATED:
+            LOG.info("Node added/updated under loglevelfilter ZK node: " + nodeName);
+            LogLevelFilter logLevelFilter = gson.fromJson(nodeData, LogLevelFilter.class);
+            logLevelFilterMonitor.setLogLevelFilter(nodeName, logLevelFilter);
+            break;
+          case NODE_REMOVED:
+            LOG.info("Node removed loglevelfilter input ZK node: " + nodeName);
+            logLevelFilterMonitor.removeLogLevelFilter(nodeName);
+            break;
+          default:
+            break;
+        }
+      }
     };
     cache.getListenable().addListener(listener);
     cache.start();
@@ -206,6 +212,89 @@ public class LogSearchConfigZK implements LogSearchConfig {
   }
 
   @Override
+  public void createLogLevelFilter(String clusterName, String logId, LogLevelFilter filter) throws Exception {
+    String nodePath = String.format("%s/%s/loglevelfilter/%s", root, clusterName, logId);
+    String logLevelFilterJson = gson.toJson(filter);
+    try {
+      client.create().creatingParentContainersIfNeeded().withACL(getAcls()).forPath(nodePath, logLevelFilterJson.getBytes());
+      LOG.info("Uploaded log level filter for the log " + logId + " for cluster " + clusterName);
+    } catch (NodeExistsException e) {
+      LOG.debug("Did not upload log level filters for log " + logId + " as it was already uploaded by another Log Feeder");
+    }
+  }
+
+  @Override
+  public void setLogLevelFilters(String clusterName, LogLevelFilterMap filters) throws Exception {
+    for (Map.Entry<String, LogLevelFilter> e : filters.getFilter().entrySet()) {
+      String nodePath = String.format("%s/%s/loglevelfilter/%s", root, clusterName, e.getKey());
+      String logLevelFilterJson = gson.toJson(e.getValue());
+      String currentLogLevelFilterJson = new String(cache.getCurrentData(nodePath).getData());
+      if (!logLevelFilterJson.equals(currentLogLevelFilterJson)) {
+        client.setData().forPath(nodePath, logLevelFilterJson.getBytes());
+        LOG.info("Set log level filter for the log " + e.getKey() + " for cluster " + clusterName);
+      }
+    }
+  }
+
+  @Override
+  public LogLevelFilterMap getLogLevelFilters(String clusterName) {
+    String parentPath = String.format("%s/%s/loglevelfilter", root, clusterName);
+    Map<String, ChildData> logLevelFilterNodes = cache.getCurrentChildren(parentPath);
+    TreeMap<String, LogLevelFilter> filters = new TreeMap<>();
+    for (Map.Entry<String, ChildData> e : logLevelFilterNodes.entrySet()) {
+      LogLevelFilter logLevelFilter = gson.fromJson(new String(e.getValue().getData()), LogLevelFilter.class);
+      filters.put(e.getKey(), logLevelFilter);
+    }
+    
+    LogLevelFilterMap logLevelFilters = new LogLevelFilterMap();
+    logLevelFilters.setFilter(filters);
+    return logLevelFilters;
+  }
+
+  private List<ACL> getAcls() {
+    String aclStr = properties.get(ZK_ACLS_PROPERTY);
+    if (StringUtils.isBlank(aclStr)) {
+      return ZooDefs.Ids.OPEN_ACL_UNSAFE;
+    }
+
+    List<ACL> acls = new ArrayList<>();
+    List<String> aclStrList = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(aclStr);
+    for (String unparcedAcl : aclStrList) {
+      String[] parts = unparcedAcl.split(":");
+      if (parts.length == 3) {
+        acls.add(new ACL(parsePermission(parts[2]), new Id(parts[0], parts[1])));
+      }
+    }
+    return acls;
+  }
+
+  private Integer parsePermission(String permission) {
+    int permissionCode = 0;
+    for (char each : permission.toLowerCase().toCharArray()) {
+      switch (each) {
+        case 'r':
+          permissionCode |= ZooDefs.Perms.READ;
+          break;
+        case 'w':
+          permissionCode |= ZooDefs.Perms.WRITE;
+          break;
+        case 'c':
+          permissionCode |= ZooDefs.Perms.CREATE;
+          break;
+        case 'd':
+          permissionCode |= ZooDefs.Perms.DELETE;
+          break;
+        case 'a':
+          permissionCode |= ZooDefs.Perms.ADMIN;
+          break;
+        default:
+          throw new IllegalArgumentException("Unsupported permission: " + permission);
+      }
+    }
+    return permissionCode;
+  }
+
+  @Override
   public void close() {
     LOG.info("Closing ZooKeeper Connection");
     client.close();

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
index 074fedb..c853f42 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/LogFeeder.java
@@ -29,7 +29,7 @@ import org.apache.ambari.logsearch.config.api.LogSearchConfig.Component;
 import org.apache.ambari.logsearch.config.zookeeper.LogSearchConfigZK;
 import org.apache.ambari.logfeeder.input.InputConfigUploader;
 import org.apache.ambari.logfeeder.input.InputManager;
-import org.apache.ambari.logfeeder.logconfig.LogConfigHandler;
+import org.apache.ambari.logfeeder.loglevelfilter.LogLevelFilterHandler;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.metrics.MetricsManager;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
@@ -71,13 +71,13 @@ public class LogFeeder {
     long startTime = System.currentTimeMillis();
 
     configHandler.init();
-    LogConfigHandler.handleConfig();
     SSLUtil.ensureStorePasswords();
     
     config = LogSearchConfigFactory.createLogSearchConfig(Component.LOGFEEDER,
         Maps.fromProperties(LogFeederUtil.getProperties()), LogSearchConfigZK.class);
+    LogLevelFilterHandler.init(config);
     InputConfigUploader.load(config);
-    config.monitorInputConfigChanges(configHandler);
+    config.monitorInputConfigChanges(configHandler, new LogLevelFilterHandler());
     
     metricsManager.init();
     

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputConfigUploader.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputConfigUploader.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputConfigUploader.java
index b70fbd1..8aec690 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputConfigUploader.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/input/InputConfigUploader.java
@@ -75,7 +75,7 @@ public class InputConfigUploader extends Thread {
             String inputConfig = Files.toString(inputConfigFile, Charset.defaultCharset());
             
             if (!config.inputConfigExists(clusterName, serviceName)) {
-              config.setInputConfig(clusterName, serviceName, inputConfig);
+              config.createInputConfig(clusterName, serviceName, inputConfig);
             }
             filesHandled.add(inputConfigFile.getAbsolutePath());
           } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/FilterLogData.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/FilterLogData.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/FilterLogData.java
deleted file mode 100644
index a05a916..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/FilterLogData.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
-import org.apache.ambari.logfeeder.input.InputMarker;
-import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.collections.MapUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.log4j.Logger;
-
-/**
- * Read configuration from solr and filter the log
- */
-public enum FilterLogData {
-  INSTANCE;
-  
-  private static final Logger LOG = Logger.getLogger(FilterLogData.class);
-  
-  private static final boolean DEFAULT_VALUE = true;
-
-  public boolean isAllowed(String jsonBlock, InputMarker inputMarker) {
-    if (StringUtils.isEmpty(jsonBlock)) {
-      return DEFAULT_VALUE;
-    }
-    Map<String, Object> jsonObj = LogFeederUtil.toJSONObject(jsonBlock);
-    return isAllowed(jsonObj, inputMarker);
-  }
-
-  public boolean isAllowed(Map<String, Object> jsonObj, InputMarker inputMarker) {
-    if ("audit".equals(inputMarker.input.getConfigs().get(LogFeederConstants.ROW_TYPE)))
-      return true;
-    
-    boolean isAllowed = applyFilter(jsonObj);
-    if (!isAllowed) {
-      LOG.trace("Filter block the content :" + LogFeederUtil.getGson().toJson(jsonObj));
-    }
-    return isAllowed;
-  }
-  
-
-  private boolean applyFilter(Map<String, Object> jsonObj) {
-    if (MapUtils.isEmpty(jsonObj)) {
-      LOG.warn("Output jsonobj is empty");
-      return DEFAULT_VALUE;
-    }
-    
-    String hostName = (String) jsonObj.get(LogFeederConstants.SOLR_HOST);
-    String componentName = (String) jsonObj.get(LogFeederConstants.SOLR_COMPONENT);
-    String level = (String) jsonObj.get(LogFeederConstants.SOLR_LEVEL);
-    if (StringUtils.isNotBlank(hostName) && StringUtils.isNotBlank(componentName) && StringUtils.isNotBlank(level)) {
-      LogFeederFilter componentFilter = LogConfigHandler.findComponentFilter(componentName);
-      if (componentFilter == null) {
-        return DEFAULT_VALUE;
-      }
-      List<String> allowedLevels = LogConfigHandler.getAllowedLevels(hostName, componentFilter);
-      if (CollectionUtils.isEmpty(allowedLevels)) {
-        allowedLevels.add(LogFeederConstants.ALL);
-      }
-      return LogFeederUtil.isListContains(allowedLevels, level, false);
-    }
-    else {
-      return DEFAULT_VALUE;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigFetcher.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigFetcher.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigFetcher.java
deleted file mode 100644
index 12c744c..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigFetcher.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
-import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.solr.client.solrj.SolrClient;
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrServerException;
-import org.apache.solr.client.solrj.SolrRequest.METHOD;
-import org.apache.solr.client.solrj.impl.CloudSolrClient;
-import org.apache.solr.client.solrj.impl.HttpSolrClient;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.response.CollectionAdminResponse;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.SolrDocumentList;
-import org.apache.solr.common.SolrException;
-
-public class LogConfigFetcher {
-  private static final Logger LOG = Logger.getLogger(LogConfigFetcher.class);
-  
-  private static LogConfigFetcher instance;
-  public synchronized static LogConfigFetcher getInstance() {
-    if (instance == null) {
-      try {
-        instance = new LogConfigFetcher();
-      } catch (Exception e) {
-        String logMessageKey = LogConfigFetcher.class.getSimpleName() + "_SOLR_UTIL";
-              LogFeederUtil.logErrorMessageByInterval(logMessageKey, "Error constructing solrUtil", e, LOG, Level.WARN);
-      }
-    }
-    return instance;
-  }
-
-  private SolrClient solrClient;
-
-  private String solrDetail = "";
-
-  public LogConfigFetcher() throws Exception {
-    String url = LogFeederUtil.getStringProperty("logfeeder.solr.url");
-    String zkConnectString = LogFeederUtil.getStringProperty("logfeeder.solr.zk_connect_string");
-    String collection = LogFeederUtil.getStringProperty("logfeeder.solr.core.config.name", "history");
-    connectToSolr(url, zkConnectString, collection);
-  }
-
-  private SolrClient connectToSolr(String url, String zkConnectString, String collection) throws Exception {
-    solrDetail = "zkConnectString=" + zkConnectString + ", collection=" + collection + ", url=" + url;
-
-    LOG.info("connectToSolr() " + solrDetail);
-    if (StringUtils.isEmpty(collection)) {
-      throw new Exception("For solr, collection name is mandatory. " + solrDetail);
-    }
-    
-    if (StringUtils.isEmpty(zkConnectString) && StringUtils.isBlank(url))
-      throw new Exception("Both zkConnectString and URL are empty. zkConnectString=" + zkConnectString + ", collection=" +
-          collection + ", url=" + url);
-    
-    if (StringUtils.isNotEmpty(zkConnectString)) {
-      solrDetail = "zkConnectString=" + zkConnectString + ", collection=" + collection;
-      LOG.info("Using zookeepr. " + solrDetail);
-      CloudSolrClient solrClouldClient = new CloudSolrClient(zkConnectString);
-      solrClouldClient.setDefaultCollection(collection);
-      solrClient = solrClouldClient;
-      checkSolrStatus(3 * 60 * 1000);
-    } else {
-      solrDetail = "collection=" + collection + ", url=" + url;
-      String collectionURL = url + "/" + collection;
-      LOG.info("Connecting to  solr : " + collectionURL);
-      solrClient = new HttpSolrClient(collectionURL);
-    }
-    return solrClient;
-  }
-
-  private boolean checkSolrStatus(int waitDurationMS) {
-    boolean status = false;
-    try {
-      long beginTimeMS = System.currentTimeMillis();
-      long waitIntervalMS = 2000;
-      int pingCount = 0;
-      while (true) {
-        pingCount++;
-        CollectionAdminResponse response = null;
-        try {
-          CollectionAdminRequest.List colListReq = new CollectionAdminRequest.List();
-          response = colListReq.process(solrClient);
-        } catch (Exception ex) {
-          LOG.error("Con't connect to Solr. solrDetail=" + solrDetail, ex);
-        }
-        if (response != null && response.getStatus() == 0) {
-          LOG.info("Solr getCollections() is success. solr=" + solrDetail);
-          status = true;
-          break;
-        }
-        if (System.currentTimeMillis() - beginTimeMS > waitDurationMS) {
-          LOG.error("Solr is not reachable even after " + (System.currentTimeMillis() - beginTimeMS)
-            + " ms. If you are using alias, then you might have to restart LogSearch after Solr is up and running. solr="
-            + solrDetail + ", response=" + response);
-          break;
-        } else {
-          LOG.warn("Solr is not reachable yet. getCollections() attempt count=" + pingCount + ". Will sleep for " +
-              waitIntervalMS + " ms and try again." + " solr=" + solrDetail + ", response=" + response);
-        }
-        Thread.sleep(waitIntervalMS);
-      }
-    } catch (Throwable t) {
-      LOG.error("Seems Solr is not up. solrDetail=" + solrDetail, t);
-    }
-    return status;
-  }
-
-  public Map<String, Object> getConfigDoc() {
-    HashMap<String, Object> configMap = new HashMap<String, Object>();
-    SolrQuery solrQuery = new SolrQuery();
-    solrQuery.setQuery("*:*");
-    String fq = LogFeederConstants.ROW_TYPE + ":" + LogFeederConstants.LOGFEEDER_FILTER_NAME;
-    solrQuery.setFilterQueries(fq);
-    try {
-      QueryResponse response = process(solrQuery);
-      if (response != null) {
-        SolrDocumentList documentList = response.getResults();
-        if (CollectionUtils.isNotEmpty(documentList)) {
-          SolrDocument configDoc = documentList.get(0);
-          String configJson = LogFeederUtil.getGson().toJson(configDoc);
-          configMap = (HashMap<String, Object>) LogFeederUtil.toJSONObject(configJson);
-        }
-      }
-    } catch (Exception e) {
-      String logMessageKey = this.getClass().getSimpleName() + "_FETCH_FILTER_CONFIG_ERROR";
-      LogFeederUtil.logErrorMessageByInterval(logMessageKey, "Error getting filter config from solr", e, LOG, Level.ERROR);
-    }
-    return configMap;
-  }
-
-  private QueryResponse process(SolrQuery solrQuery) throws SolrServerException, IOException, SolrException {
-    if (solrClient != null) {
-      QueryResponse queryResponse = solrClient.query(solrQuery, METHOD.POST);
-      return queryResponse;
-    } else {
-      LOG.error("solrClient can't be null");
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandler.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandler.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandler.java
deleted file mode 100644
index 0ece637..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandler.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.text.DateFormat;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
-
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
-import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.log4j.Logger;
-
-public class LogConfigHandler extends Thread {
-  private static final Logger LOG = Logger.getLogger(LogConfigHandler.class);
-  
-  private static final int DEFAULT_SOLR_CONFIG_INTERVAL = 5;
-  private static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS";
-  private static final String TIMEZONE = "GMT";
-  private static final int RETRY_INTERVAL = 30;
-
-  static {
-    TimeZone.setDefault(TimeZone.getTimeZone(TIMEZONE));
-  }
-  
-  private static ThreadLocal<DateFormat> formatter = new ThreadLocal<DateFormat>() {
-    protected DateFormat initialValue() {
-      SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT);
-      dateFormat.setTimeZone(TimeZone.getTimeZone(TIMEZONE));
-      return dateFormat;
-    }
-  };
-  
-  private static boolean filterEnabled;
-  private static LogFeederFilterWrapper logFeederFilterWrapper;
-
-  private static boolean running = false;
-
-  public static void handleConfig() {
-    filterEnabled = LogFeederUtil.getBooleanProperty("logfeeder.log.filter.enable", false);
-    if (!filterEnabled) {
-      LOG.info("Logfeeder filter Scheduler is disabled.");
-      return;
-    }
-    if (!running) {
-      new LogConfigHandler().start();
-      running = true;
-      LOG.info("Logfeeder Filter Thread started!");
-    } else {
-      LOG.warn("Logfeeder Filter Thread is already running.");
-    }
-  }
-  
-  private LogConfigHandler() {
-    setName(getClass().getSimpleName());
-    setDaemon(true);
-  }
-
-  @Override
-  public void run() {
-    String zkConnectString = LogFeederUtil.getStringProperty("logfeeder.solr.zk_connect_string");
-    String solrUrl = LogFeederUtil.getStringProperty("logfeeder.solr.url");
-    if (StringUtils.isBlank(zkConnectString) && StringUtils.isBlank(solrUrl)) {
-      LOG.warn("Neither Solr ZK Connect String nor solr Url for UserConfig/History is set." +
-          "Won't look for level configuration from Solr.");
-      return;
-    }
-    
-    int solrConfigInterval = LogFeederUtil.getIntProperty("logfeeder.solr.config.interval", DEFAULT_SOLR_CONFIG_INTERVAL);
-    do {
-      LOG.debug("Updating config from solr after every " + solrConfigInterval + " sec.");
-      fetchConfig();
-      try {
-        Thread.sleep(1000 * solrConfigInterval);
-      } catch (InterruptedException e) {
-        LOG.error(e.getLocalizedMessage(), e.getCause());
-      }
-    } while (true);
-  }
-
-  private synchronized void fetchConfig() {
-    LogConfigFetcher fetcher = LogConfigFetcher.getInstance();
-    if (fetcher != null) {
-      Map<String, Object> configDocMap = fetcher.getConfigDoc();
-      String configJson = (String) configDocMap.get(LogFeederConstants.VALUES);
-      if (configJson != null) {
-        logFeederFilterWrapper = LogFeederUtil.getGson().fromJson(configJson, LogFeederFilterWrapper.class);
-      }
-    }
-  }
-
-  public static boolean isFilterAvailable() {
-    return logFeederFilterWrapper != null;
-  }
-
-  public static List<String> getAllowedLevels(String hostName, LogFeederFilter componentFilter) {
-    String componentName = componentFilter.getLabel();
-    List<String> hosts = componentFilter.getHosts();
-    List<String> defaultLevels = componentFilter.getDefaultLevels();
-    List<String> overrideLevels = componentFilter.getOverrideLevels();
-    String expiryTime = componentFilter.getExpiryTime();
-    
-    // check is user override or not
-    if (StringUtils.isNotEmpty(expiryTime) || CollectionUtils.isNotEmpty(overrideLevels) || CollectionUtils.isNotEmpty(hosts)) {
-      if (CollectionUtils.isEmpty(hosts)) { // hosts list is empty or null consider it apply on all hosts
-        hosts.add(LogFeederConstants.ALL);
-      }
-      
-      if (LogFeederUtil.isListContains(hosts, hostName, false)) {
-        if (isFilterExpired(componentFilter)) {
-          LOG.debug("Filter for component " + componentName + " and host :" + hostName + " is expired at " +
-              componentFilter.getExpiryTime());
-          return defaultLevels;
-        } else {
-          return overrideLevels;
-        }
-      }
-    }
-    return defaultLevels;
-  }
-
-  private static boolean isFilterExpired(LogFeederFilter logfeederFilter) {
-    if (logfeederFilter == null)
-      return false;
-    
-    Date filterEndDate = parseFilterExpireDate(logfeederFilter);
-    if (filterEndDate == null) {
-      return false;
-    }
-    
-    Date currentDate = new Date();
-    if (!currentDate.before(filterEndDate)) {
-      LOG.debug("Filter for  Component :" + logfeederFilter.getLabel() + " and Hosts : [" +
-          StringUtils.join(logfeederFilter.getHosts(), ',') + "] is expired because of filter endTime : " +
-          formatter.get().format(filterEndDate) + " is older than currentTime :" + formatter.get().format(currentDate));
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  private static Date parseFilterExpireDate(LogFeederFilter vLogfeederFilter) {
-    String expiryTime = vLogfeederFilter.getExpiryTime();
-    if (StringUtils.isNotEmpty(expiryTime)) {
-      try {
-        return formatter.get().parse(expiryTime);
-      } catch (ParseException e) {
-        LOG.error("Filter have invalid ExpiryTime : " + expiryTime + " for component :" + vLogfeederFilter.getLabel()
-          + " and hosts : [" + StringUtils.join(vLogfeederFilter.getHosts(), ',') + "]");
-      }
-    }
-    return null;
-  }
-  
-  public static LogFeederFilter findComponentFilter(String componentName) {
-    waitForFilter();
-    
-    if (logFeederFilterWrapper != null) {
-      HashMap<String, LogFeederFilter> filter = logFeederFilterWrapper.getFilter();
-      if (filter != null) {
-        LogFeederFilter componentFilter = filter.get(componentName);
-        if (componentFilter != null) {
-          return componentFilter;
-        }
-      }
-    }
-    LOG.trace("Filter is not there for component :" + componentName);
-    return null;
-  }
-
-  private static void waitForFilter() {
-    if (!filterEnabled || logFeederFilterWrapper != null) {
-      return;
-    }
-    
-    while (true) {
-      try {
-        Thread.sleep(RETRY_INTERVAL * 1000);
-      } catch (InterruptedException e) {
-        LOG.error(e);
-      }
-      
-      LOG.info("Checking if config is available");
-      if (logFeederFilterWrapper != null) {
-        LOG.info("Config is available");
-        return;
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilter.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilter.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilter.java
deleted file mode 100644
index 60c8ae8..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilter.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.codehaus.jackson.annotate.JsonAutoDetect;
-import org.codehaus.jackson.annotate.JsonAutoDetect.Visibility;
-import org.codehaus.jackson.map.annotate.JsonSerialize;
-
-@JsonAutoDetect(getterVisibility = Visibility.NONE, setterVisibility = Visibility.NONE, fieldVisibility = Visibility.ANY)
-@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class LogFeederFilter {
-
-  private String label;
-  private List<String> hosts;
-  private List<String> defaultLevels;
-  private List<String> overrideLevels;
-  private String expiryTime;
-
-  public LogFeederFilter() {
-    hosts = new ArrayList<String>();
-    defaultLevels = new ArrayList<String>();
-    overrideLevels = new ArrayList<String>();
-  }
-
-  public String getLabel() {
-    return label;
-  }
-
-  public void setLabel(String label) {
-    this.label = label;
-  }
-
-  public List<String> getHosts() {
-    return hosts;
-  }
-
-  public void setHosts(List<String> hosts) {
-    this.hosts = hosts;
-  }
-
-  public List<String> getDefaultLevels() {
-    return defaultLevels;
-  }
-
-  public void setDefaultLevels(List<String> defaultLevels) {
-    this.defaultLevels = defaultLevels;
-  }
-
-  public List<String> getOverrideLevels() {
-    return overrideLevels;
-  }
-
-  public void setOverrideLevels(List<String> overrideLevels) {
-    this.overrideLevels = overrideLevels;
-  }
-
-  public String getExpiryTime() {
-    return expiryTime;
-  }
-
-  public void setExpiryTime(String expiryTime) {
-    this.expiryTime = expiryTime;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilterWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilterWrapper.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilterWrapper.java
deleted file mode 100644
index 9199cd3..0000000
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/logconfig/LogFeederFilterWrapper.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ambari.logfeeder.logconfig;
-
-import java.util.HashMap;
-
-import javax.xml.bind.annotation.XmlAccessType;
-import javax.xml.bind.annotation.XmlAccessorType;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.codehaus.jackson.annotate.JsonAutoDetect;
-import org.codehaus.jackson.annotate.JsonAutoDetect.Visibility;
-import org.codehaus.jackson.map.annotate.JsonSerialize;
-
-@JsonAutoDetect(getterVisibility = Visibility.NONE, setterVisibility = Visibility.NONE, fieldVisibility = Visibility.ANY)
-@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
-@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
-public class LogFeederFilterWrapper {
-
-  private HashMap<String, LogFeederFilter> filter;
-  private String id;
-
-  public HashMap<String, LogFeederFilter> getFilter() {
-    return filter;
-  }
-
-  public void setFilter(HashMap<String, LogFeederFilter> filter) {
-    this.filter = filter;
-  }
-
-  public String getId() {
-    return id;
-  }
-
-  public void setId(String id) {
-    this.id = id;
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java
new file mode 100644
index 0000000..1f635af
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/FilterLogData.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logfeeder.loglevelfilter;
+
+import java.util.Map;
+
+import org.apache.ambari.logfeeder.common.LogFeederConstants;
+import org.apache.ambari.logfeeder.input.InputMarker;
+import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.log4j.Logger;
+
+public enum FilterLogData {
+  INSTANCE;
+  
+  private static final Logger LOG = Logger.getLogger(FilterLogData.class);
+  
+  private static final boolean DEFAULT_VALUE = true;
+
+  public boolean isAllowed(String jsonBlock, InputMarker inputMarker) {
+    if (StringUtils.isEmpty(jsonBlock)) {
+      return DEFAULT_VALUE;
+    }
+    Map<String, Object> jsonObj = LogFeederUtil.toJSONObject(jsonBlock);
+    return isAllowed(jsonObj, inputMarker);
+  }
+
+  public boolean isAllowed(Map<String, Object> jsonObj, InputMarker inputMarker) {
+    if ("audit".equals(inputMarker.input.getConfigs().get(LogFeederConstants.ROW_TYPE)))
+      return true;
+    
+    boolean isAllowed = applyFilter(jsonObj);
+    if (!isAllowed) {
+      LOG.trace("Filter block the content :" + LogFeederUtil.getGson().toJson(jsonObj));
+    }
+    return isAllowed;
+  }
+  
+
+  private boolean applyFilter(Map<String, Object> jsonObj) {
+    if (MapUtils.isEmpty(jsonObj)) {
+      LOG.warn("Output jsonobj is empty");
+      return DEFAULT_VALUE;
+    }
+    
+    String hostName = (String) jsonObj.get(LogFeederConstants.SOLR_HOST);
+    String logId = (String) jsonObj.get(LogFeederConstants.SOLR_COMPONENT);
+    String level = (String) jsonObj.get(LogFeederConstants.SOLR_LEVEL);
+    if (StringUtils.isNotBlank(hostName) && StringUtils.isNotBlank(logId) && StringUtils.isNotBlank(level)) {
+      return LogLevelFilterHandler.isAllowed(hostName, logId, level);
+    } else {
+      return DEFAULT_VALUE;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/LogLevelFilterHandler.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/LogLevelFilterHandler.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/LogLevelFilterHandler.java
new file mode 100644
index 0000000..8a4d953
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/loglevelfilter/LogLevelFilterHandler.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ambari.logfeeder.loglevelfilter;
+
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+
+import org.apache.ambari.logfeeder.common.LogFeederConstants;
+import org.apache.ambari.logfeeder.util.LogFeederUtil;
+import org.apache.ambari.logsearch.config.api.LogLevelFilterMonitor;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.log4j.Logger;
+
+public class LogLevelFilterHandler implements LogLevelFilterMonitor {
+  private static final Logger LOG = Logger.getLogger(LogLevelFilterHandler.class);
+  
+  private static final String TIMEZONE = "GMT";
+  private static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS";
+  
+  private static ThreadLocal<DateFormat> formatter = new ThreadLocal<DateFormat>() {
+    protected DateFormat initialValue() {
+      SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT);
+      dateFormat.setTimeZone(TimeZone.getTimeZone(TIMEZONE));
+      return dateFormat;
+    }
+  };
+  
+  private static LogSearchConfig config;
+  private static String clusterName = LogFeederUtil.getStringProperty("cluster.name");
+  private static boolean filterEnabled;
+  private static List<String> defaultLogLevels;
+  private static Map<String, LogLevelFilter> filters = new HashMap<>();
+
+  public static void init(LogSearchConfig config_) {
+    config = config_;
+    filterEnabled = LogFeederUtil.getBooleanProperty("logfeeder.log.filter.enable", false);
+    defaultLogLevels = Arrays.asList(LogFeederUtil.getStringProperty("logfeeder.include.default.level").split(","));
+    TimeZone.setDefault(TimeZone.getTimeZone(TIMEZONE));
+  }
+
+  @Override
+  public void setLogLevelFilter(String logId, LogLevelFilter logLevelFilter) {
+    synchronized (LogLevelFilterHandler.class) {
+      filters.put(logId, logLevelFilter);
+    }
+  }
+
+  @Override
+  public void removeLogLevelFilter(String logId) {
+    synchronized (LogLevelFilterHandler.class) {
+      filters.remove(logId);
+    }
+  }
+
+  public static boolean isAllowed(String hostName, String logId, String level) {
+    if (!filterEnabled) {
+      return true;
+    }
+    
+    LogLevelFilter logFilter = findLogFilter(logId);
+    List<String> allowedLevels = getAllowedLevels(hostName, logFilter);
+    return allowedLevels.isEmpty() || allowedLevels.contains(level);
+  }
+
+  private static synchronized LogLevelFilter findLogFilter(String logId) {
+    LogLevelFilter logFilter = filters.get(logId);
+    if (logFilter != null) {
+      return logFilter;
+    }
+
+    LOG.info("Filter is not present for log " + logId + ", creating default filter");
+    LogLevelFilter defaultFilter = new LogLevelFilter();
+    defaultFilter.setLabel(logId);
+    defaultFilter.setDefaultLevels(defaultLogLevels);
+
+    try {
+      config.createLogLevelFilter(clusterName, logId, defaultFilter);
+      filters.put(logId, defaultFilter);
+    } catch (Exception e) {
+      LOG.warn("Could not persist the default filter for log " + logId, e);
+    }
+
+    return defaultFilter;
+  }
+
+  private static List<String> getAllowedLevels(String hostName, LogLevelFilter componentFilter) {
+    String componentName = componentFilter.getLabel();
+    List<String> hosts = componentFilter.getHosts();
+    List<String> defaultLevels = componentFilter.getDefaultLevels();
+    List<String> overrideLevels = componentFilter.getOverrideLevels();
+    Date expiryTime = componentFilter.getExpiryTime();
+
+    // check is user override or not
+    if (expiryTime != null || CollectionUtils.isNotEmpty(overrideLevels) || CollectionUtils.isNotEmpty(hosts)) {
+      if (CollectionUtils.isEmpty(hosts)) { // hosts list is empty or null consider it apply on all hosts
+        hosts.add(LogFeederConstants.ALL);
+      }
+
+      if (hosts.isEmpty() || hosts.contains(hostName)) {
+        if (isFilterExpired(componentFilter)) {
+          LOG.debug("Filter for component " + componentName + " and host :" + hostName + " is expired at " +
+              componentFilter.getExpiryTime());
+          return defaultLevels;
+        } else {
+          return overrideLevels;
+        }
+      }
+    }
+    return defaultLevels;
+  }
+
+  private static boolean isFilterExpired(LogLevelFilter logLevelFilter) {
+    if (logLevelFilter == null)
+      return false;
+
+    Date filterEndDate = logLevelFilter.getExpiryTime();
+    if (filterEndDate == null) {
+      return false;
+    }
+
+    Date currentDate = new Date();
+    if (!currentDate.before(filterEndDate)) {
+      LOG.debug("Filter for  Component :" + logLevelFilter.getLabel() + " and Hosts : [" +
+          StringUtils.join(logLevelFilter.getHosts(), ',') + "] is expired because of filter endTime : " +
+          formatter.get().format(filterEndDate) + " is older than currentTime :" + formatter.get().format(currentDate));
+      return true;
+    } else {
+      return false;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
index 135fb32..ba872f8 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/output/OutputManager.java
@@ -29,7 +29,7 @@ import java.util.UUID;
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
-import org.apache.ambari.logfeeder.logconfig.FilterLogData;
+import org.apache.ambari.logfeeder.loglevelfilter.FilterLogData;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
 import org.apache.ambari.logfeeder.util.MurmurHash;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
index bb2f0a9..1929178 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/util/LogFeederUtil.java
@@ -32,7 +32,6 @@ import java.util.Map;
 import java.util.Properties;
 
 import org.apache.ambari.logfeeder.LogFeeder;
-import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.metrics.MetricData;
 import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang3.StringUtils;
@@ -311,24 +310,6 @@ public class LogFeederUtil {
       return false;
     }
   }
-
-  public static boolean isListContains(List<String> list, String str, boolean caseSensitive) {
-    if (list == null) {
-      return false;
-    }
-    
-    for (String value : list) {
-      if (value == null) {
-        continue;
-      }
-      
-      if (caseSensitive ? value.equals(str) : value.equalsIgnoreCase(str) ||
-          value.equalsIgnoreCase(LogFeederConstants.ALL)) {
-        return true;
-      }
-    }
-    return false;
-  }
   
   private static String logfeederTempDir = null;
   

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
index 266108f..44314c6 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/logconfig/LogConfigHandlerTest.java
@@ -18,8 +18,9 @@
 
 package org.apache.ambari.logfeeder.logconfig;
 
-import java.lang.reflect.Field;
+import java.util.Arrays;
 import java.util.Collections;
+import java.util.Date;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -29,15 +30,17 @@ import static org.junit.Assert.*;
 import org.apache.ambari.logfeeder.common.LogFeederConstants;
 import org.apache.ambari.logfeeder.input.Input;
 import org.apache.ambari.logfeeder.input.InputMarker;
+import org.apache.ambari.logfeeder.loglevelfilter.FilterLogData;
+import org.apache.ambari.logfeeder.loglevelfilter.LogLevelFilterHandler;
 import org.apache.ambari.logfeeder.util.LogFeederUtil;
-import org.junit.AfterClass;
+import org.apache.ambari.logsearch.config.api.LogSearchConfig;
+import org.apache.ambari.logsearch.config.api.model.loglevelfilter.LogLevelFilter;
+import org.apache.commons.lang.time.DateUtils;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class LogConfigHandlerTest {
   
-  private static LogConfigFetcher mockFetcher;
-  
   private static InputMarker inputMarkerAudit;
   private static InputMarker inputMarkerService;
   static {
@@ -56,47 +59,41 @@ public class LogConfigHandlerTest {
     replay(auditInput, serviceInput);
   }
   
-  private static final Map<String, Object> CONFIG_MAP = new HashMap<>();
-  static {
-    CONFIG_MAP.put("jsons",
-        "{'filter':{" +
-          "'configured_log_file':{" +
-            "'label':'configured_log_file'," +
-            "'hosts':[]," +
-            "'defaultLevels':['FATAL','ERROR','WARN','INFO']," +
-            "'overrideLevels':[]}," +
-          "'configured_log_file2':{" +
-            "'label':'configured_log_file2'," +
-            "'hosts':['host1']," +
-            "'defaultLevels':['FATAL','ERROR','WARN','INFO']," +
-            "'overrideLevels':['FATAL','ERROR','WARN','INFO','DEBUG','TRACE']," +
-            "'expiryTime':'3000-01-01T00:00:00.000Z'}," +
-          "'configured_log_file3':{" +
-            "'label':'configured_log_file3'," +
-            "'hosts':['host1']," +
-            "'defaultLevels':['FATAL','ERROR','WARN','INFO']," +
-            "'overrideLevels':['FATAL','ERROR','WARN','INFO','DEBUG','TRACE']," +
-            "'expiryTime':'1000-01-01T00:00:00.000Z'}" +
-          "}}");
-  }
-  
   @BeforeClass
   public static void init() throws Exception {
-    mockFetcher = strictMock(LogConfigFetcher.class);
-    Field f = LogConfigFetcher.class.getDeclaredField("instance");
-    f.setAccessible(true);
-    f.set(null, mockFetcher);
-    expect(mockFetcher.getConfigDoc()).andReturn(CONFIG_MAP).anyTimes();
-    replay(mockFetcher);
-    
     LogFeederUtil.loadProperties("logfeeder.properties", null);
-    LogConfigHandler.handleConfig();
-    Thread.sleep(1000);
+    
+    LogSearchConfig config = strictMock(LogSearchConfig.class);
+    config.createLogLevelFilter(anyString(), anyString(), anyObject(LogLevelFilter.class));
+    expectLastCall().anyTimes();
+    LogLevelFilterHandler.init(config);
+    
+    LogLevelFilter logLevelFilter1 = new LogLevelFilter();
+    logLevelFilter1.setHosts(Collections.<String> emptyList());
+    logLevelFilter1.setDefaultLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO"));
+    logLevelFilter1.setOverrideLevels(Collections.<String> emptyList());
+    
+    LogLevelFilter logLevelFilter2 = new LogLevelFilter();
+    logLevelFilter2.setHosts(Arrays.asList("host1"));
+    logLevelFilter2.setDefaultLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO"));
+    logLevelFilter2.setOverrideLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"));
+    logLevelFilter2.setExpiryTime(DateUtils.addDays(new Date(), 1));
+    
+    LogLevelFilter logLevelFilter3 = new LogLevelFilter();
+    logLevelFilter3.setHosts(Arrays.asList("host1"));
+    logLevelFilter3.setDefaultLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO"));
+    logLevelFilter3.setOverrideLevels(Arrays.asList("FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"));
+    logLevelFilter3.setExpiryTime(DateUtils.addDays(new Date(), -1));
+    
+    LogLevelFilterHandler h = new LogLevelFilterHandler();
+    h.setLogLevelFilter("configured_log_file1", logLevelFilter1);
+    h.setLogLevelFilter("configured_log_file2", logLevelFilter2);
+    h.setLogLevelFilter("configured_log_file3", logLevelFilter3);
   }
   
   @Test
   public void testLogConfigHandler_auditAllowed() throws Exception {
-    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file', 'level':'DEBUG'}",
+    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file1', 'level':'DEBUG'}",
         inputMarkerAudit));
   }
   
@@ -109,19 +106,25 @@ public class LogConfigHandlerTest {
   
   @Test
   public void testLogConfigHandler_notConfiguredLogAllowed() throws Exception {
-    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'not_configured_log_file', 'level':'INFO'}",
+    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'not_configured_log_file1', 'level':'WARN'}",
+        inputMarkerService));
+  }
+  
+  @Test
+  public void testLogConfigHandler_notConfiguredLogNotAllowed() throws Exception {
+    assertFalse(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'not_configured_log_file1', 'level':'TRACE'}",
         inputMarkerService));
   }
   
   @Test
   public void testLogConfigHandler_configuredDataAllow() throws Exception {
-    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file', 'level':'INFO'}",
+    assertTrue(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file1', 'level':'INFO'}",
         inputMarkerService));
   }
   
   @Test
   public void testLogConfigHandler_configuredDataDontAllow() throws Exception {
-    assertFalse(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file', 'level':'DEBUG'}",
+    assertFalse(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file1', 'level':'DEBUG'}",
         inputMarkerService));
   }
   
@@ -142,9 +145,4 @@ public class LogConfigHandlerTest {
     assertFalse(FilterLogData.INSTANCE.isAllowed("{'host':'host1', 'type':'configured_log_file3', 'level':'DEBUG'}",
         inputMarkerService));
   }
-  
-  @AfterClass
-  public static void finish() {
-    verify(mockFetcher);
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/3b94d3cf/ambari-logsearch/ambari-logsearch-logfeeder/src/test/resources/logfeeder.properties
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/resources/logfeeder.properties b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/resources/logfeeder.properties
index 59020cc..19027d1 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/resources/logfeeder.properties
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/resources/logfeeder.properties
@@ -17,4 +17,5 @@ logfeeder.log.filter.enable=true
 logfeeder.solr.config.interval=5
 logfeeder.solr.zk_connect_string=some_connect_string
 logfeeder.metrics.collector.hosts=some_collector_host
-node.hostname=test_host_name
\ No newline at end of file
+node.hostname=test_host_name
+logfeeder.include.default.level=FATAL,ERROR,WARN
\ No newline at end of file


[20/25] ambari git commit: AMBARI-20758 Aggregate local metrics for minute aggregation time window (dsen)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
index c0feed5..e5da9ba 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/controller.py
@@ -27,6 +27,9 @@ from event_definition import HostMetricCollectEvent, ProcessMetricCollectEvent
 from metric_collector import MetricsCollector
 from emitter import Emitter
 from host_info import HostInfo
+from aggregator import Aggregator
+from aggregator import AggregatorWatchdog
+
 
 logger = logging.getLogger()
 
@@ -50,11 +53,15 @@ class Controller(threading.Thread):
     self.initialize_events_cache()
     self.emitter = Emitter(self.config, self.application_metric_map, stop_handler)
     self._t = None
+    self.aggregator = None
+    self.aggregator_watchdog = None
 
   def run(self):
     logger.info('Running Controller thread: %s' % threading.currentThread().getName())
 
     self.start_emitter()
+    if self.config.is_inmemory_aggregation_enabled():
+      self.start_aggregator_with_watchdog()
 
     # Wake every 5 seconds to push events to the queue
     while True:
@@ -62,6 +69,10 @@ class Controller(threading.Thread):
         logger.warn('Event Queue full!! Suspending further collections.')
       else:
         self.enqueque_events()
+      # restart aggregator if needed
+      if self.config.is_inmemory_aggregation_enabled() and not self.aggregator_watchdog.is_ok():
+        logger.warning("Aggregator is not available. Restarting aggregator.")
+        self.start_aggregator_with_watchdog()
       pass
       # Wait for the service stop event instead of sleeping blindly
       if 0 == self._stop_handler.wait(self.sleep_interval):
@@ -75,6 +86,12 @@ class Controller(threading.Thread):
     # The emitter thread should have stopped by now, just ensure it has shut
     # down properly
     self.emitter.join(5)
+
+    if self.config.is_inmemory_aggregation_enabled():
+      self.aggregator.stop()
+      self.aggregator_watchdog.stop()
+      self.aggregator.join(5)
+      self.aggregator_watchdog.join(5)
     pass
 
   # TODO: Optimize to not use Timer class and use the Queue instead
@@ -115,3 +132,14 @@ class Controller(threading.Thread):
 
   def start_emitter(self):
     self.emitter.start()
+
+  # Start aggregator and watcher threads
+  def start_aggregator_with_watchdog(self):
+    if self.aggregator:
+      self.aggregator.stop()
+    if self.aggregator_watchdog:
+      self.aggregator.stop()
+    self.aggregator = Aggregator(self.config, self._stop_handler)
+    self.aggregator_watchdog = AggregatorWatchdog(self.config, self._stop_handler)
+    self.aggregator.start()
+    self.aggregator_watchdog.start()

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
index e2a7f0d..77b8c23 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/emitter.py
@@ -44,10 +44,16 @@ class Emitter(threading.Thread):
     self._stop_handler = stop_handler
     self.application_metric_map = application_metric_map
     self.collector_port = config.get_server_port()
-    self.all_metrics_collector_hosts = config.get_metrics_collector_hosts()
+    self.all_metrics_collector_hosts = config.get_metrics_collector_hosts_as_list()
     self.is_server_https_enabled = config.is_server_https_enabled()
     self.set_instanceid = config.is_set_instanceid()
     self.instanceid = config.get_instanceid()
+    self.is_inmemory_aggregation_enabled = config.is_inmemory_aggregation_enabled()
+
+    if self.is_inmemory_aggregation_enabled:
+      self.collector_port = config.get_inmemory_aggregation_port()
+      self.all_metrics_collector_hosts = ['localhost']
+      self.is_server_https_enabled = False
 
     if self.is_server_https_enabled:
       self.ca_certs = config.get_ca_certs()

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
index bfb6957..7a9fbec 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/stop_handler.py
@@ -117,7 +117,8 @@ class StopHandlerLinux(StopHandler):
 
   def wait(self, timeout=None):
     # Stop process when stop event received
-    if self.stop_event.wait(timeout):
+    self.stop_event.wait(timeout)
+    if self.stop_event.isSet():
       logger.info("Stop event received")
       return 0
     # Timeout

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
index d218015..53d27f8 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/main.py
@@ -21,7 +21,7 @@ limitations under the License.
 import logging
 import os
 import sys
-
+import signal
 from ambari_commons.os_utils import remove_file
 
 from core.controller import Controller
@@ -73,6 +73,10 @@ def server_process_main(stop_handler, scmStatus=None):
   if scmStatus is not None:
     scmStatus.reportStarted()
 
+  # For some reason this is needed to catch system signals like SIGTERM
+  # TODO fix if possible
+  signal.pause()
+
   #The controller thread finishes when the stop event is signaled
   controller.join()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
index 211e9cd..76b1c15 100644
--- a/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-kafka-sink/src/main/java/org/apache/hadoop/metrics2/sink/kafka/KafkaTimelineMetricsReporter.java
@@ -72,6 +72,8 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private static final String TIMELINE_METRICS_SSL_KEYSTORE_PASSWORD_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + SSL_KEYSTORE_PASSWORD_PROPERTY;
   private static final String TIMELINE_METRICS_KAFKA_INSTANCE_ID_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + INSTANCE_ID_PROPERTY;
   private static final String TIMELINE_METRICS_KAFKA_SET_INSTANCE_ID_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + SET_INSTANCE_ID_PROPERTY;
+  private static final String TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY;
+  private static final String TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY = TIMELINE_METRICS_KAFKA_PREFIX + HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY;
   private static final String TIMELINE_DEFAULT_HOST = "localhost";
   private static final String TIMELINE_DEFAULT_PORT = "6188";
   private static final String TIMELINE_DEFAULT_PROTOCOL = "http";
@@ -96,6 +98,8 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private String[] includedMetricsPrefixes;
   // Local cache to avoid prefix matching everytime
   private Set<String> excludedMetrics = new HashSet<>();
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   @Override
   protected String getCollectorUri(String host) {
@@ -132,6 +136,17 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
     return hostname;
   }
 
+
+  @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
   public void setMetricsCache(TimelineMetricsCache metricsCache) {
     this.metricsCache = metricsCache;
   }
@@ -169,6 +184,8 @@ public class KafkaTimelineMetricsReporter extends AbstractTimelineMetricsSink
         instanceId = props.getString(TIMELINE_METRICS_KAFKA_INSTANCE_ID_PROPERTY);
         setInstanceId = props.getBoolean(TIMELINE_METRICS_KAFKA_SET_INSTANCE_ID_PROPERTY);
 
+        hostInMemoryAggregationEnabled = props.getBoolean(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY, false);
+        hostInMemoryAggregationPort = props.getInt(TIMELINE_METRICS_KAFKA_HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY, 61888);
         setMetricsCache(new TimelineMetricsCache(maxRowCacheSize, metricsSendInterval));
 
         if (metricCollectorProtocol.contains("https")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
index 08f0598..24b2c8b 100644
--- a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
@@ -55,6 +55,8 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private NimbusClient nimbusClient;
   private String applicationId;
   private int timeoutSeconds;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   public StormTimelineMetricsReporter() {
 
@@ -96,6 +98,16 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Map conf) {
     LOG.info("Preparing Storm Metrics Reporter");
     try {
@@ -130,6 +142,8 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
       applicationId = cf.get(APP_ID).toString();
       setInstanceId = Boolean.getBoolean(cf.get(SET_INSTANCE_ID_PROPERTY).toString());
       instanceId = cf.get(INSTANCE_ID_PROPERTY).toString();
+      hostInMemoryAggregationEnabled = Boolean.valueOf(cf.get(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY).toString());
+      hostInMemoryAggregationPort = Integer.valueOf(cf.get(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY).toString());
 
       collectorUri = constructTimelineMetricUri(protocol, findPreferredCollectHost(), port);
       if (protocol.contains("https")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index 20f60e1..c9c0538 100644
--- a/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink-legacy/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -61,6 +61,8 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   private String applicationId;
   private boolean setInstanceId;
   private String instanceId;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   @Override
   protected String getCollectorUri(String host) {
@@ -98,6 +100,16 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Map map, Object o, TopologyContext topologyContext, IErrorReporter iErrorReporter) {
     LOG.info("Preparing Storm Metrics Sink");
     try {
@@ -126,6 +138,8 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
 
     instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY);
     setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
+    hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+    hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
     // Initialize the collector write strategy
     super.init();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
index 14f160b..5b75065 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsReporter.java
@@ -50,6 +50,8 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   private String instanceId;
   private String applicationId;
   private int timeoutSeconds;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   public StormTimelineMetricsReporter() {
 
@@ -91,6 +93,16 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Object registrationArgument) {
     LOG.info("Preparing Storm Metrics Reporter");
     try {
@@ -119,6 +131,10 @@ public class StormTimelineMetricsReporter extends AbstractTimelineMetricsSink
       applicationId = configuration.getProperty(CLUSTER_REPORTER_APP_ID, DEFAULT_CLUSTER_REPORTER_APP_ID);
       setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY));
       instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY);
+
+      hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+      hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
+
       if (protocol.contains("https")) {
         String trustStorePath = configuration.getProperty(SSL_KEYSTORE_PATH_PROPERTY).trim();
         String trustStoreType = configuration.getProperty(SSL_KEYSTORE_TYPE_PROPERTY).trim();

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
index 425201c..320e177 100644
--- a/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-storm-sink/src/main/java/org/apache/hadoop/metrics2/sink/storm/StormTimelineMetricsSink.java
@@ -70,6 +70,8 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   private String applicationId;
   private String instanceId;
   private boolean setInstanceId;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
 
   @Override
   protected String getCollectorUri(String host) {
@@ -107,6 +109,16 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void prepare(Map map, Object o, TopologyContext topologyContext, IErrorReporter iErrorReporter) {
     LOG.info("Preparing Storm Metrics Sink");
     try {
@@ -137,6 +149,10 @@ public class StormTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     port = configuration.getProperty(COLLECTOR_PORT, "6188");
     instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY);
     setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
+
+    hostInMemoryAggregationEnabled = Boolean.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+    hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
+
     // Initialize the collector write strategy
     super.init();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index c242a2f..f984253 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -24,10 +24,13 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricWithAggregatedValues;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.metrics2.sink.timeline.TopNConfig;
 import org.apache.hadoop.service.AbstractService;
@@ -41,6 +44,7 @@ import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataManager;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.ConditionBuilder;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.PhoenixTransactSQL;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.TopNCondition;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.function.SeriesAggregateFunction;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.function.TimelineMetricsSeriesAggregateFunction;
@@ -62,6 +66,7 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.TIMELINE_METRICS_HOST_INMEMORY_AGGREGATION;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.USE_GROUPBY_AGGREGATOR_QUERIES;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration.DEFAULT_TOPN_HOSTS_LIMIT;
 import static org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.ACTUAL_AGGREGATOR_NAMES;
@@ -152,10 +157,14 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
       scheduleAggregatorThread(dailyClusterAggregator);
 
       // Start the minute host aggregator
-      TimelineMetricAggregator minuteHostAggregator =
-        TimelineMetricAggregatorFactory.createTimelineMetricAggregatorMinute(
-          hBaseAccessor, metricsConf, haController);
-      scheduleAggregatorThread(minuteHostAggregator);
+      if (Boolean.parseBoolean(metricsConf.get(TIMELINE_METRICS_HOST_INMEMORY_AGGREGATION, "true"))) {
+        LOG.info("timeline.metrics.host.inmemory.aggregation is set to True, disabling host minute aggregation on collector");
+      } else {
+        TimelineMetricAggregator minuteHostAggregator =
+          TimelineMetricAggregatorFactory.createTimelineMetricAggregatorMinute(
+            hBaseAccessor, metricsConf, haController);
+        scheduleAggregatorThread(minuteHostAggregator);
+      }
 
       // Start the hourly host aggregator
       TimelineMetricAggregator hourlyHostAggregator =
@@ -390,6 +399,18 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
   }
 
   @Override
+  public TimelinePutResponse putHostAggregatedMetrics(AggregationResult aggregationResult) throws SQLException, IOException {
+    Map<TimelineMetric, MetricHostAggregate> aggregateMap = new HashMap<>();
+    for (TimelineMetricWithAggregatedValues entry : aggregationResult.getResult()) {
+      aggregateMap.put(entry.getTimelineMetric(), entry.getMetricAggregate());
+    }
+    hBaseAccessor.saveHostAggregateRecords(aggregateMap, PhoenixTransactSQL.METRICS_AGGREGATE_MINUTE_TABLE_NAME);
+
+
+    return new TimelinePutResponse();
+  }
+
+  @Override
   public Map<String, Map<String,Set<String>>> getInstanceHostsMetadata(String instanceId, String appId)
           throws SQLException, IOException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
index fb369e8..3b2a119 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.RetryCounterFactory;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.SingleValuedTimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -40,8 +42,6 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.AggregatorUtils;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricReadHelper;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index 0d5042f..023465b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -296,6 +296,8 @@ public class TimelineMetricConfiguration {
 
   public static final String AMSHBASE_METRICS_WHITESLIST_FILE = "amshbase_metrics_whitelist";
 
+  public static final String TIMELINE_METRICS_HOST_INMEMORY_AGGREGATION = "timeline.metrics.host.inmemory.aggregation";
+
   private Configuration hbaseConf;
   private Configuration metricsConf;
   private Configuration amsEnvConf;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
index bde09cb..d052d54 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricStore.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -80,6 +81,7 @@ public interface TimelineMetricStore {
    */
   Map<String, List<TimelineMetricMetadata>> getTimelineMetricMetadata(String query) throws SQLException, IOException;
 
+  TimelinePutResponse putHostAggregatedMetrics(AggregationResult aggregationResult) throws SQLException, IOException;
   /**
    * Returns all hosts that have written metrics with the apps on the host
    * @return { hostname : [ appIds ] }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
index 65d54c0..7b03b30 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorSink.java
@@ -19,10 +19,10 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 
 import java.util.Map;
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
deleted file mode 100644
index 825ac25..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricAggregate.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.codehaus.jackson.annotate.JsonProperty;
-import org.codehaus.jackson.annotate.JsonSubTypes;
-import org.codehaus.jackson.map.ObjectMapper;
-
-import java.io.IOException;
-
-/**
-*
-*/
-@JsonSubTypes({@JsonSubTypes.Type(value = MetricClusterAggregate.class),
-  @JsonSubTypes.Type(value = MetricHostAggregate.class)})
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class MetricAggregate {
-  private static final ObjectMapper mapper = new ObjectMapper();
-
-  protected Double sum = 0.0;
-  protected Double deviation;
-  protected Double max = Double.MIN_VALUE;
-  protected Double min = Double.MAX_VALUE;
-
-  public MetricAggregate() {
-  }
-
-  MetricAggregate(Double sum, Double deviation, Double max,
-                  Double min) {
-    this.sum = sum;
-    this.deviation = deviation;
-    this.max = max;
-    this.min = min;
-  }
-
-  public void updateSum(Double sum) {
-    this.sum += sum;
-  }
-
-  public void updateMax(Double max) {
-    if (max > this.max) {
-      this.max = max;
-    }
-  }
-
-  public void updateMin(Double min) {
-    if (min < this.min) {
-      this.min = min;
-    }
-  }
-
-  @JsonProperty("sum")
-  public Double getSum() {
-    return sum;
-  }
-
-  @JsonProperty("deviation")
-  public Double getDeviation() {
-    return deviation;
-  }
-
-  @JsonProperty("max")
-  public Double getMax() {
-    return max;
-  }
-
-  @JsonProperty("min")
-  public Double getMin() {
-    return min;
-  }
-
-  public void setSum(Double sum) {
-    this.sum = sum;
-  }
-
-  public void setDeviation(Double deviation) {
-    this.deviation = deviation;
-  }
-
-  public void setMax(Double max) {
-    this.max = max;
-  }
-
-  public void setMin(Double min) {
-    this.min = min;
-  }
-
-  public String toJSON() throws IOException {
-    return mapper.writeValueAsString(this);
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
deleted file mode 100644
index 9c837b6..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricClusterAggregate.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
-
-
-import org.codehaus.jackson.annotate.JsonCreator;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
-*
-*/
-public class MetricClusterAggregate extends MetricAggregate {
-  private int numberOfHosts;
-
-  @JsonCreator
-  public MetricClusterAggregate() {
-  }
-
-  public MetricClusterAggregate(Double sum, int numberOfHosts, Double deviation,
-                         Double max, Double min) {
-    super(sum, deviation, max, min);
-    this.numberOfHosts = numberOfHosts;
-  }
-
-  @JsonProperty("numberOfHosts")
-  public int getNumberOfHosts() {
-    return numberOfHosts;
-  }
-
-  public void updateNumberOfHosts(int count) {
-    this.numberOfHosts += count;
-  }
-
-  public void setNumberOfHosts(int numberOfHosts) {
-    this.numberOfHosts = numberOfHosts;
-  }
-
-  /**
-   * Find and update min, max and avg for a minute
-   */
-  public void updateAggregates(MetricClusterAggregate hostAggregate) {
-    updateMax(hostAggregate.getMax());
-    updateMin(hostAggregate.getMin());
-    updateSum(hostAggregate.getSum());
-    updateNumberOfHosts(hostAggregate.getNumberOfHosts());
-  }
-
-  @Override
-  public String toString() {
-    return "MetricAggregate{" +
-      "sum=" + sum +
-      ", numberOfHosts=" + numberOfHosts +
-      ", deviation=" + deviation +
-      ", max=" + max +
-      ", min=" + min +
-      '}';
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
deleted file mode 100644
index 340ec75..0000000
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/MetricHostAggregate.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
-
-
-import org.codehaus.jackson.annotate.JsonCreator;
-import org.codehaus.jackson.annotate.JsonProperty;
-
-/**
- * Represents a collection of minute based aggregation of values for
- * resolution greater than a minute.
- */
-public class MetricHostAggregate extends MetricAggregate {
-
-  private long numberOfSamples = 0;
-
-  @JsonCreator
-  public MetricHostAggregate() {
-    super(0.0, 0.0, Double.MIN_VALUE, Double.MAX_VALUE);
-  }
-
-  public MetricHostAggregate(Double sum, int numberOfSamples,
-                             Double deviation,
-                             Double max, Double min) {
-    super(sum, deviation, max, min);
-    this.numberOfSamples = numberOfSamples;
-  }
-
-  @JsonProperty("numberOfSamples")
-  public long getNumberOfSamples() {
-    return numberOfSamples == 0 ? 1 : numberOfSamples;
-  }
-
-  public void updateNumberOfSamples(long count) {
-    this.numberOfSamples += count;
-  }
-
-  public void setNumberOfSamples(long numberOfSamples) {
-    this.numberOfSamples = numberOfSamples;
-  }
-
-  public double getAvg() {
-    return sum / numberOfSamples;
-  }
-
-  /**
-   * Find and update min, max and avg for a minute
-   */
-  public void updateAggregates(MetricHostAggregate hostAggregate) {
-    updateMax(hostAggregate.getMax());
-    updateMin(hostAggregate.getMin());
-    updateSum(hostAggregate.getSum());
-    updateNumberOfSamples(hostAggregate.getNumberOfSamples());
-  }
-
-  @Override
-  public String toString() {
-    return "MetricHostAggregate{" +
-      "sum=" + sum +
-      ", numberOfSamples=" + numberOfSamples +
-      ", deviation=" + deviation +
-      ", max=" + max +
-      ", min=" + min +
-      '}';
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
index 44aca03..9eaf456 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricAppAggregator.java
@@ -21,6 +21,7 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricsFilter;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
index 0934356..ba16b43 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregator.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.AGGREGATOR_NAME;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.MetricCollectorHAController;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
index a5a3499..34b1f9b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecond.java
@@ -38,6 +38,7 @@ import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.mutable.MutableInt;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.PostProcessingUtil;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
index 0ea9c08..a17433b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricHostAggregator.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.AggregationTaskRunner.AGGREGATOR_NAME;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
index b5f49fb..672f85f 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricReadHelper.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
 
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.SingleValuedTimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
index 9da921a..50cfb08 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TimelineWebServices.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
 import org.apache.hadoop.metrics2.sink.timeline.PrecisionLimitExceededException;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricMetadata;
@@ -285,6 +286,36 @@ public class TimelineWebServices {
     }
   }
 
+  /**
+   * Store the given metrics into the timeline store, and return errors that
+   * happened during storing.
+   */
+  @Path("/metrics/aggregated")
+  @POST
+  @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public TimelinePutResponse postAggregatedMetrics(
+    @Context HttpServletRequest req,
+    @Context HttpServletResponse res,
+    AggregationResult metrics) {
+
+    init(res);
+    if (metrics == null) {
+      return new TimelinePutResponse();
+    }
+
+    try {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing aggregated metrics: " +
+                TimelineUtils.dumpTimelineRecordtoJSON(metrics, true));
+      }
+
+      return timelineMetricStore.putHostAggregatedMetrics(metrics);
+    } catch (Exception e) {
+      LOG.error("Error saving metrics.", e);
+      throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
   @Path("/containermetrics")
   @POST
   @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
index 0087fd9..d5baaef 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/ITPhoenixHBaseAccessor.java
@@ -26,12 +26,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregator;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
index 37ec134..7eeb9c4 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/MetricTestHelper.java
@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 import java.util.Arrays;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
index a910cc2..d668178 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessorTest.java
@@ -22,11 +22,11 @@ import com.google.common.collect.Multimap;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.Function;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.Condition;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
index 44f48e8..3009163 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestMetricHostAggregate.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics
   .timeline;
 
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.junit.Test;
 
 import static org.assertj.core.api.Assertions.assertThat;
@@ -34,7 +34,7 @@ public class TestMetricHostAggregate {
     assertThat(aggregate.getSum()).isEqualTo(3.0);
     assertThat(aggregate.getMin()).isEqualTo(1.0);
     assertThat(aggregate.getMax()).isEqualTo(2.0);
-    assertThat(aggregate.getAvg()).isEqualTo(3.0 / 2);
+    assertThat(aggregate.calculateAverage()).isEqualTo(3.0 / 2);
   }
 
   @Test
@@ -50,7 +50,7 @@ public class TestMetricHostAggregate {
     assertThat(aggregate.getSum()).isEqualTo(12.0);
     assertThat(aggregate.getMin()).isEqualTo(0.5);
     assertThat(aggregate.getMax()).isEqualTo(7.5);
-    assertThat(aggregate.getAvg()).isEqualTo((3.0 + 8.0 + 1.0) / 5);
+    assertThat(aggregate.calculateAverage()).isEqualTo((3.0 + 8.0 + 1.0) / 5);
   }
 
   static MetricHostAggregate createAggregate (Double sum, Double min,
@@ -63,4 +63,4 @@ public class TestMetricHostAggregate {
     aggregate.setNumberOfSamples(samplesCount);
     return aggregate;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
index f00906e..ac2f9d7 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
 import org.apache.hadoop.metrics2.sink.timeline.ContainerMetric;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
@@ -92,6 +93,11 @@ public class TestTimelineMetricStore implements TimelineMetricStore {
   }
 
   @Override
+  public TimelinePutResponse putHostAggregatedMetrics(AggregationResult aggregationResult) throws SQLException, IOException {
+    return null;
+  }
+
+  @Override
   public Map<String, Set<String>> getHostAppsMetadata() throws SQLException, IOException {
     return Collections.emptyMap();
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
index fa0cfe9..53f6f6c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricsAggregatorMemorySink.java
@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline;
 
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.Precision;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 
 import java.util.Collections;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
index f083731..07fd85d 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITClusterAggregator.java
@@ -20,13 +20,13 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline
 
 import junit.framework.Assert;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.AbstractMiniHBaseClusterTest;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.MetricTestHelper;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricClusterAggregate;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.MetricHostAggregate;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineClusterMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregator;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
index 9873643..75b3f91 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/ITMetricAggregator.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.AbstractMiniHBaseClusterTest;
@@ -124,14 +125,14 @@ public class ITMetricAggregator extends AbstractMiniHBaseClusterTest {
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else if ("mem_free".equals(currentMetric.getMetricName())) {
         assertEquals(2.0, currentHostAggregate.getMax());
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else {
         fail("Unexpected entry");
@@ -198,7 +199,7 @@ public class ITMetricAggregator extends AbstractMiniHBaseClusterTest {
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(12 * 20, currentHostAggregate.getNumberOfSamples());
         assertEquals(12 * 15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
       }
     }
   }
@@ -260,7 +261,7 @@ public class ITMetricAggregator extends AbstractMiniHBaseClusterTest {
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(12 * 20, currentHostAggregate.getNumberOfSamples());
         assertEquals(12 * 15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
       }
     }
   }
@@ -309,14 +310,14 @@ public class ITMetricAggregator extends AbstractMiniHBaseClusterTest {
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else if ("mem_free".equals(currentMetric.getMetricName())) {
         assertEquals(2.0, currentHostAggregate.getMax());
         assertEquals(0.0, currentHostAggregate.getMin());
         assertEquals(20, currentHostAggregate.getNumberOfSamples());
         assertEquals(15.0, currentHostAggregate.getSum());
-        assertEquals(15.0 / 20, currentHostAggregate.getAvg());
+        assertEquals(15.0 / 20, currentHostAggregate.calculateAverage());
         count++;
       } else {
         fail("Unexpected entry");

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
index 78db11d..6541b2c 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/TimelineMetricClusterAggregatorSecondTest.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.sink.timeline.MetricClusterAggregate;
 import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataKey;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.discovery.TimelineMetricMetadataManager;

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/pom.xml b/ambari-metrics/pom.xml
index 2d88912..02f9574 100644
--- a/ambari-metrics/pom.xml
+++ b/ambari-metrics/pom.xml
@@ -33,6 +33,7 @@
     <module>ambari-metrics-host-monitoring</module>
     <module>ambari-metrics-grafana</module>
     <module>ambari-metrics-assembly</module>
+    <module>ambari-metrics-host-aggregator</module>
   </modules>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
index 8d1f63f..a0765bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metrics/system/impl/AmbariMetricSinkImpl.java
@@ -300,6 +300,16 @@ public class AmbariMetricSinkImpl extends AbstractTimelineMetricsSink implements
     return hostName;
   }
 
+  @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return false;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return 0;
+  }
+
   private List<TimelineMetric> getFilteredMetricList(List<SingleMetric> metrics) {
     final List<TimelineMetric> metricList = new ArrayList<>();
     for (SingleMetric metric : metrics) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index 150b0a8..5d21514 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -153,6 +153,8 @@ if has_metric_collector:
   pass
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
 
 # if accumulo is selected accumulo_tserver_hosts should not be empty, but still default just in case
 if 'slave_hosts' in config['clusterHostInfo']:

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
index 6873c85..742ea3c 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
@@ -16,6 +16,9 @@
 # Poll collectors every {{metrics_report_interval}} seconds
 *.period={{metrics_collection_period}}
 
+*.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
 {% if has_metric_collector %}
 
 *.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
index cb66537..4d33661 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
@@ -101,6 +101,14 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>timeline.metrics.host.inmemory.aggregation.jvm.arguments</name>
+    <value>-Xmx256m -Xms128m -XX:PermSize=68m</value>
+    <description>
+      Local aggregator jvm extra arguments separated with spaces
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>timeline.metrics.skip.network.interfaces.patterns</name>
     <value>None</value>
     <description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index 8e1671e..1b085f6 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -787,4 +787,15 @@
     <value>{{cluster_zookeeper_clientPort}}</value>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>timeline.metrics.host.inmemory.aggregation</name>
+    <value>false</value>
+    <description>if set to "true" host metrics will be aggregated in memory on each host</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>timeline.metrics.host.inmemory.aggregation.port</name>
+    <value>61888</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
index 740a91a..9031b46 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
@@ -93,6 +93,9 @@
               <primary>true</primary>
             </log>
           </logs>
+          <configuration-dependencies>
+            <config-type>ams-site</config-type>
+          </configuration-dependencies>
         </component>
 
         <component>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index a929847..f49d47d 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -163,6 +163,20 @@ def ams(name=None):
               create_parents = True
     )
 
+    if params.host_in_memory_aggregation and params.log4j_props is not None:
+      File(os.path.join(params.ams_monitor_conf_dir, "log4j.properties"),
+           owner=params.ams_user,
+           content=params.log4j_props
+           )
+
+    XmlConfig("ams-site.xml",
+              conf_dir=params.ams_monitor_conf_dir,
+              configurations=params.config['configurations']['ams-site'],
+              configuration_attributes=params.config['configuration_attributes']['ams-site'],
+              owner=params.ams_user,
+              group=params.user_group
+              )
+
     TemplateConfig(
       os.path.join(params.ams_monitor_conf_dir, "metric_monitor.ini"),
       owner=params.ams_user,
@@ -366,6 +380,22 @@ def ams(name=None, action=None):
               create_parents = True
     )
 
+    if params.host_in_memory_aggregation and params.log4j_props is not None:
+      File(format("{params.ams_monitor_conf_dir}/log4j.properties"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.ams_user,
+           content=InlineTemplate(params.log4j_props)
+           )
+
+    XmlConfig("ams-site.xml",
+              conf_dir=params.ams_monitor_conf_dir,
+              configurations=params.config['configurations']['ams-site'],
+              configuration_attributes=params.config['configuration_attributes']['ams-site'],
+              owner=params.ams_user,
+              group=params.user_group
+              )
+
     Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_log_dir}")
             )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 50dde1c..b8c14f4 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -224,6 +224,11 @@ metrics_collector_heapsize = check_append_heap_property(str(metrics_collector_he
 master_heapsize = check_append_heap_property(str(master_heapsize), "m")
 regionserver_heapsize = check_append_heap_property(str(regionserver_heapsize), "m")
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+host_in_memory_aggregation_jvm_arguments = default("/configurations/ams-env/timeline.metrics.host.inmemory.aggregation.jvm.arguments",
+                                                   "-Xmx256m -Xms128m -XX:PermSize=68m")
+
 regionserver_xmn_max = default('/configurations/ams-hbase-env/hbase_regionserver_xmn_max', None)
 if regionserver_xmn_max:
   regionserver_xmn_max = int(trim_heap_property(str(regionserver_xmn_max), "m"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
index 9729bbe..bb0db4f 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/hadoop-metrics2-hbase.properties.j2
@@ -58,6 +58,9 @@ rpc.protocol={{metric_collector_protocol}}
 
 *.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
 *.sink.timeline.slave.host.name={{hostname}}
+*.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
 hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 hbase.sink.timeline.period={{metrics_collection_period}}
 hbase.sink.timeline.sendInterval={{metrics_report_interval}}000

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2 b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
index 769ad67..b7dee50 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/templates/metric_monitor.ini.j2
@@ -38,3 +38,10 @@ failover_strategy = {{failover_strategy}}
 failover_strategy_blacklisted_interval_seconds = {{failover_strategy_blacklisted_interval_seconds}}
 port = {{metric_collector_port}}
 https_enabled = {{metric_collector_https_enabled}}
+
+[aggregation]
+host_in_memory_aggregation = {{host_in_memory_aggregation}}
+host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+java_home = {{java64_home}}
+jvm_arguments = {{host_in_memory_aggregation_jvm_arguments}}
+ams_monitor_log_dir = {{ams_monitor_log_dir}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
index 86a290f..0e0c9aa 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
@@ -124,6 +124,9 @@ if has_metric_collector:
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
 # Cluster Zookeeper quorum
 zookeeper_quorum = None
 if not len(default("/clusterHostInfo/zookeeper_hosts", [])) == 0:


[22/25] ambari git commit: AMBARI-21046. UI: Upgrades should be started using repo_version_ids instead of version strings (alexantonenko)

Posted by jl...@apache.org.
AMBARI-21046. UI: Upgrades should be started using repo_version_ids instead of version strings (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1568f800
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1568f800
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1568f800

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 1568f800764a6b20f2f09330f112070ebc0f7f86
Parents: 041d353
Author: Alex Antonenko <hi...@gmail.com>
Authored: Wed May 17 19:24:44 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Wed May 17 19:56:45 2017 +0300

----------------------------------------------------------------------
 .../controllers/main/admin/stack_and_upgrade_controller.js    | 7 +++++--
 ambari-web/app/utils/ajax/ajax.js                             | 4 ++--
 .../main/admin/stack_and_upgrade_controller_test.js           | 6 ++++++
 3 files changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1568f800/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 0f2efb0..d444b2d 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -414,7 +414,8 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
     if (currentVersion) {
       this.set('currentVersion', {
         repository_version: currentVersion.get('repositoryVersion.repositoryVersion'),
-        repository_name: currentVersion.get('repositoryVersion.displayName')
+        repository_name: currentVersion.get('repositoryVersion.displayName'),
+        id: currentVersion.get('repositoryVersion.id')
       });
     }
   },
@@ -736,6 +737,7 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
         from: App.RepositoryVersion.find().findProperty('displayName', this.get('upgradeVersion')).get('repositoryVersion'),
         value: currentVersion.repository_version,
         label: currentVersion.repository_name,
+        id: currentVersion.id,
         isDowngrade: true,
         upgradeType: this.get('upgradeType')
       },
@@ -1379,7 +1381,8 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
       label: version.get('displayName'),
       type: version.get('upgradeType'),
       skipComponentFailures: version.get('skipComponentFailures') ? 'true' : 'false',
-      skipSCFailures: version.get('skipSCFailures') ? 'true' : 'false'
+      skipSCFailures: version.get('skipSCFailures') ? 'true' : 'false',
+      id: version.get('id')
     };
     if (App.get('supports.preUpgradeCheck')) {
       this.set('requestInProgress', true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/1568f800/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ajax/ajax.js b/ambari-web/app/utils/ajax/ajax.js
index d9eeaa6..0b584d8 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -1712,7 +1712,7 @@ var urls = {
         timeout : 600000,
         data: JSON.stringify({
           "Upgrade": {
-            "repository_version": data.value,
+            "repository_version_id": data.id,
             "upgrade_type": data.type,
             "skip_failures": data.skipComponentFailures,
             "skip_service_check_failures": data.skipSCFailures,
@@ -1731,7 +1731,7 @@ var urls = {
         data: JSON.stringify({
           "Upgrade": {
             "from_version": data.from,
-            "repository_version": data.value,
+            "repository_version_id": data.id,
             "upgrade_type": data.upgradeType,
             "direction": "DOWNGRADE"
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1568f800/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
index e696bb1..fa0a0b9 100644
--- a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
@@ -128,6 +128,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       sinon.stub(App.StackVersion, 'find').returns([Em.Object.create({
         state: 'CURRENT',
         repositoryVersion: {
+          id: '1',
           repositoryVersion: '2.2',
           displayName: 'HDP-2.2'
         }
@@ -155,6 +156,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
     });
     it('currentVersion is corrent', function () {
       expect(controller.get('currentVersion')).to.eql({
+        "id": "1",
         "repository_version": "2.2",
         "repository_name": "HDP-2.2"
       });
@@ -389,6 +391,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
   describe("#runPreUpgradeCheck()", function() {
     it("make ajax call", function() {
       controller.runPreUpgradeCheck(Em.Object.create({
+        id: '1',
         repositoryVersion: '2.2',
         displayName: 'HDP-2.2',
         upgradeType: 'ROLLING',
@@ -399,6 +402,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       expect(args[0]).to.exists;
       expect(args[0].sender).to.be.eql(controller);
       expect(args[0].data).to.be.eql({
+        id: '1',
         value: '2.2',
         label: 'HDP-2.2',
         type: 'ROLLING',
@@ -1126,6 +1130,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       controller.set('upgradeVersion', 'HDP-2.3');
       controller.set('upgradeType', 'NON_ROLLING');
       controller.startDowngrade(Em.Object.create({
+        id: '1',
         repository_version: '2.2',
         repository_name: 'HDP-2.2'
       }));
@@ -1139,6 +1144,7 @@ describe('App.MainAdminStackAndUpgradeController', function() {
     it('request-data is valid', function () {
       expect(this.callArgs.data).to.eql({
         from: '2.3',
+        id: '1',
         value: '2.2',
         label: 'HDP-2.2',
         isDowngrade: true,


[17/25] ambari git commit: AMBARI-21037 When adding services, new versions of configs get added for the config types associated with config groups. (atkach)

Posted by jl...@apache.org.
AMBARI-21037 When adding services, new versions of configs get added for the config types associated with config groups. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/735c4137
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/735c4137
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/735c4137

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 735c4137cb2cbb95c6da44809a447c148c7c6f9d
Parents: 4427a33
Author: Andrii Tkach <at...@apache.org>
Authored: Wed May 17 13:57:49 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Wed May 17 18:15:20 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/controllers/wizard.js            |  6 ++--
 .../app/controllers/wizard/step7_controller.js  |  1 +
 .../app/mixins/common/configs/configs_saver.js  | 32 ++++++++++++++------
 .../mixins/common/configs/configs_saver_test.js | 13 ++++++++
 4 files changed, 39 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/735c4137/ambari-web/app/controllers/wizard.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index c3a54cf..a8a0249 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -1031,16 +1031,16 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, App.ThemesMappingM
           })
         });
         //configGroup copied into plain JS object to avoid Converting circular structure to JSON
-        var hostNames = configGroup.get('hosts').map(function(host_name) {return hosts[host_name].id;});
+        var hostIds = configGroup.get('hosts').map(function(host_name) {return hosts[host_name].id;});
         serviceConfigGroups.push({
           id: configGroup.get('id'),
           name: configGroup.get('name'),
           description: configGroup.get('description'),
-          hosts: hostNames.slice(),
+          hosts: hostIds.slice(),
           properties: properties.slice(),
           is_default: configGroup.get('isDefault'),
           is_for_installed_service: isForInstalledService,
-          is_for_update: configGroup.isForUpdate || configGroup.get('hash') != this.getConfigGroupHash(configGroup, hostNames),
+          is_for_update: configGroup.get('isForUpdate') || configGroup.get('hash') !== this.getConfigGroupHash(configGroup, configGroup.get('hosts')),
           service_name: configGroup.get('serviceName'),
           service_id: configGroup.get('serviceName'),
           desired_configs: configGroup.get('desiredConfigs'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/735c4137/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index 8e14b70..6685c01 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -1186,6 +1186,7 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
               this.get('stepConfigs').findProperty('serviceName', service.serviceName).get('configs').pushObject(overriddenSCP);
             }
           }, this);
+          modelGroup.set('hash', this.get('wizardController').getConfigGroupHash(modelGroup));
         }, this);
         service.set('configGroups', App.ServiceConfigGroup.find().filterProperty('serviceName', service.get('serviceName')));
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/735c4137/ambari-web/app/mixins/common/configs/configs_saver.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/configs_saver.js b/ambari-web/app/mixins/common/configs/configs_saver.js
index 7d8721d..4a4163e 100644
--- a/ambari-web/app/mixins/common/configs/configs_saver.js
+++ b/ambari-web/app/mixins/common/configs/configs_saver.js
@@ -368,10 +368,10 @@ App.ConfigsSaverMixin = Em.Mixin.create({
   /*********************************** 3. GENERATING JSON TO SAVE *****************************/
 
   /**
-   * Map that contains last used timestamp per filename.
+   * Map that contains last used timestamp.
    * There is a case when two config groups can update same filename almost simultaneously
-   * so they have equal timestamp only and this causes collision. So to prevent this we need to check
-   * if specific filename with specific timestamp is not saved yet
+   * so they have equal timestamp and this causes collision. So to prevent this we need to check
+   * if specific filename with specific timestamp is not saved yet.
    *
    * @type {Object}
    */
@@ -389,14 +389,9 @@ App.ConfigsSaverMixin = Em.Mixin.create({
     var desired_config = [];
     if (Em.isArray(configsToSave) && Em.isArray(fileNamesToSave) && fileNamesToSave.length && configsToSave.length) {
       serviceConfigNote = serviceConfigNote || "";
-      var tagVersion = "version" + (new Date).getTime();
-      fileNamesToSave.forEach(function(fName) {
 
-        /** @see <code>_timeStamps<code> **/
-        if (this.get('_timeStamps')[fName] === tagVersion) {
-          tagVersion = "version" + ((new Date).getTime() + 1);
-        }
-        this.get('_timeStamps')[fName] = tagVersion;
+      fileNamesToSave.forEach(function(fName) {
+        var tagVersion = this.getUniqueTag();
 
         if (this.allowSaveSite(fName)) {
           var properties = configsToSave.filterProperty('filename', fName);
@@ -409,6 +404,23 @@ App.ConfigsSaverMixin = Em.Mixin.create({
   },
 
   /**
+   * generate unique tag
+   * @returns {string}
+   */
+  getUniqueTag: function() {
+    var timestamp = (new Date).getTime();
+    var tagVersion = "version" + timestamp;
+
+    while(this.get('_timeStamps')[tagVersion]) {
+      timestamp++;
+      tagVersion = "version" + timestamp;
+    }
+    /** @see <code>_timeStamps<code> **/
+    this.get('_timeStamps')[tagVersion] = true;
+    return tagVersion;
+  },
+
+  /**
    * For some file names we have a restriction
    * and can't save them, in this case method will return false
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/735c4137/ambari-web/test/mixins/common/configs/configs_saver_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mixins/common/configs/configs_saver_test.js b/ambari-web/test/mixins/common/configs/configs_saver_test.js
index 6e65cf9..7815938 100644
--- a/ambari-web/test/mixins/common/configs/configs_saver_test.js
+++ b/ambari-web/test/mixins/common/configs/configs_saver_test.js
@@ -259,6 +259,19 @@ describe('App.ConfigsSaverMixin', function() {
     })
   });
 
+  describe('#getUniqueTag', function() {
+
+    it('should generate unique tags', function() {
+      var tags = [];
+      for (var i = 0; i < 3; i++) {
+        tags.push(mixin.getUniqueTag());
+      }
+      expect(tags[1]).to.not.be.equal(tags[0]);
+      expect(tags[2]).to.not.be.equal(tags[1]);
+      expect(tags[0]).to.not.be.equal(tags[2]);
+    });
+  });
+
   describe('#getModifiedConfigs', function () {
     var configs = [
       Em.Object.create({


[10/25] ambari git commit: AMBARI-21028. The credential cache for livy is messed up (Weiqing Yang via smohanty)

Posted by jl...@apache.org.
AMBARI-21028. The credential cache for livy is messed up (Weiqing Yang via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/704eb525
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/704eb525
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/704eb525

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 704eb5254545bcdb8b7a9a60b2518124e7e4b265
Parents: 9adffcf
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue May 16 19:14:35 2017 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue May 16 19:15:45 2017 -0700

----------------------------------------------------------------------
 .../SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py    | 2 +-
 .../SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/704eb525/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
index 746a98e..f15e747 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.1/package/scripts/alerts/alert_spark_livy_port.py
@@ -82,7 +82,7 @@ def execute(configurations={}, parameters={}, host_name=None):
     if host_name is None:
         host_name = socket.getfqdn()
 
-    livyuser = LIVYUSER_DEFAULT
+    livyuser = configurations[SMOKEUSER_KEY]
 
     security_enabled = False
     if SECURITY_ENABLED_KEY in configurations:

http://git-wip-us.apache.org/repos/asf/ambari/blob/704eb525/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
index 44c284f..d69f663 100644
--- a/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
+++ b/ambari-server/src/main/resources/common-services/SPARK2/2.0.0/package/scripts/alerts/alert_spark2_livy_port.py
@@ -82,7 +82,7 @@ def execute(configurations={}, parameters={}, host_name=None):
     if host_name is None:
         host_name = socket.getfqdn()
 
-    livyuser = LIVYUSER_DEFAULT
+    livyuser = configurations[SMOKEUSER_KEY]
 
     security_enabled = False
     if SECURITY_ENABLED_KEY in configurations:


[03/25] ambari git commit: AMBARI-21025. Avoid unnecessary calls to load stage summaries (magyari_sandor)

Posted by jl...@apache.org.
AMBARI-21025. Avoid unnecessary calls to load stage summaries (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8cebc18b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8cebc18b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8cebc18b

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 8cebc18b82e3e9df0c440675f26f8e6be32ebf06
Parents: b97d268
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Wed May 10 16:22:07 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Tue May 16 13:59:08 2017 +0200

----------------------------------------------------------------------
 .../internal/StageResourceProvider.java         | 81 +++-----------------
 1 file changed, 10 insertions(+), 71 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8cebc18b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
index db9a0e2..ec3688d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
@@ -211,8 +211,7 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
 
     // !!! poor mans cache.  toResource() shouldn't be calling the db
     // every time, when the request id is likely the same for each stageEntity
-    Map<Long, Map<Long, HostRoleCommandStatusSummaryDTO>> cache =
-      new HashMap<>();
+    Map<Long, Map<Long, HostRoleCommandStatusSummaryDTO>> cache = new HashMap<>();
 
     List<StageEntity> entities = dao.findAll(request, predicate);
     for (StageEntity entity : entities) {
@@ -232,8 +231,11 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
 
       if (null != lr) {
         Collection<StageEntity> topologyManagerStages = lr.getStageEntities();
+        // preload summaries as it contains summaries for all stages within this request
+        Map<Long, HostRoleCommandStatusSummaryDTO> summary = topologyManager.getStageSummaries(requestId);
+        cache.put(requestId, summary);
         for (StageEntity entity : topologyManagerStages) {
-          Resource stageResource = toResource(entity, propertyIds);
+          Resource stageResource = toResource(cache, entity, propertyIds);
           if (predicate.evaluate(stageResource)) {
             results.add(stageResource);
           }
@@ -242,7 +244,11 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
     } else {
       Collection<StageEntity> topologyManagerStages = topologyManager.getStages();
       for (StageEntity entity : topologyManagerStages) {
-        Resource stageResource = toResource(entity, propertyIds);
+        if (!cache.containsKey(entity.getRequestId())) {
+          Map<Long, HostRoleCommandStatusSummaryDTO> summary = topologyManager.getStageSummaries(entity.getRequestId());
+          cache.put(entity.getRequestId(), summary);
+        }
+        Resource stageResource = toResource(cache, entity, propertyIds);
         if (predicate.evaluate(stageResource)) {
           results.add(stageResource);
         }
@@ -351,71 +357,4 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
     return resource;
   }
 
-  /**
-   * Converts the {@link StageEntity} to a {@link Resource}.
-   *
-   * @param entity        the entity to convert (not {@code null})
-   * @param requestedIds  the properties requested (not {@code null})
-   *
-   * @return the new resource
-   */
-  //todo: almost exactly the same as other toResource except how summaries are obtained
-  //todo: refactor to combine the two with the summary logic extracted
-  private Resource toResource(StageEntity entity, Set<String> requestedIds) {
-
-    Resource resource = new ResourceImpl(Resource.Type.Stage);
-
-    Long clusterId = entity.getClusterId();
-    if (clusterId != null && !clusterId.equals(Long.valueOf(-1L))) {
-      try {
-        Cluster cluster = clustersProvider.get().getClusterById(clusterId);
-
-        setResourceProperty(resource, STAGE_CLUSTER_NAME, cluster.getClusterName(), requestedIds);
-      } catch (Exception e) {
-        LOG.error("Can not get information for cluster " + clusterId + ".", e );
-      }
-    }
-
-    Map<Long, HostRoleCommandStatusSummaryDTO> summary =
-        topologyManager.getStageSummaries(entity.getRequestId());
-
-    setResourceProperty(resource, STAGE_STAGE_ID, entity.getStageId(), requestedIds);
-    setResourceProperty(resource, STAGE_REQUEST_ID, entity.getRequestId(), requestedIds);
-    setResourceProperty(resource, STAGE_CONTEXT, entity.getRequestContext(), requestedIds);
-
-    // this property is lazy loaded in JPA; don't use it unless requested
-    if (isPropertyRequested(STAGE_CLUSTER_HOST_INFO, requestedIds)) {
-      resource.setProperty(STAGE_CLUSTER_HOST_INFO, entity.getClusterHostInfo());
-    }
-
-    // this property is lazy loaded in JPA; don't use it unless requested
-    if (isPropertyRequested(STAGE_COMMAND_PARAMS, requestedIds)) {
-      resource.setProperty(STAGE_COMMAND_PARAMS, entity.getCommandParamsStage());
-    }
-
-    // this property is lazy loaded in JPA; don't use it unless requested
-    if (isPropertyRequested(STAGE_HOST_PARAMS, requestedIds)) {
-      resource.setProperty(STAGE_HOST_PARAMS, entity.getHostParamsStage());
-    }
-
-    setResourceProperty(resource, STAGE_SKIPPABLE, entity.isSkippable(), requestedIds);
-
-    Long startTime = Long.MAX_VALUE;
-    Long endTime = 0L;
-    if (summary.containsKey(entity.getStageId())) {
-      startTime = summary.get(entity.getStageId()).getStartTime();
-      endTime = summary.get(entity.getStageId()).getEndTime();
-    }
-
-    setResourceProperty(resource, STAGE_START_TIME, startTime, requestedIds);
-    setResourceProperty(resource, STAGE_END_TIME, endTime, requestedIds);
-
-    CalculatedStatus status = CalculatedStatus.statusFromStageSummary(summary, Collections.singleton(entity.getStageId()));
-
-    setResourceProperty(resource, STAGE_PROGRESS_PERCENT, status.getPercent(), requestedIds);
-    setResourceProperty(resource, STAGE_STATUS, status.getStatus(), requestedIds);
-    setResourceProperty(resource, STAGE_DISPLAY_STATUS, status.getDisplayStatus(), requestedIds);
-
-    return resource;
-  }
 }


[21/25] ambari git commit: AMBARI-20758 Aggregate local metrics for minute aggregation time window (dsen)

Posted by jl...@apache.org.
AMBARI-20758 Aggregate local metrics for minute aggregation time window (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/041d353b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/041d353b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/041d353b

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 041d353b0d75b20b0322097e13a1701226e6fc97
Parents: 772be78
Author: Dmytro Sen <ds...@apache.org>
Authored: Wed May 17 19:38:29 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Wed May 17 19:38:29 2017 +0300

----------------------------------------------------------------------
 .../logfeeder/metrics/LogFeederAMSClient.java   |  12 +-
 ambari-metrics/ambari-metrics-assembly/pom.xml  |  20 +++
 .../src/main/assembly/monitor-windows.xml       |   7 +
 .../src/main/assembly/monitor.xml               |   9 +-
 .../timeline/AbstractTimelineMetricsSink.java   |  24 ++-
 .../sink/timeline/AggregationResult.java        |  60 +++++++
 .../metrics2/sink/timeline/MetricAggregate.java | 110 ++++++++++++
 .../sink/timeline/MetricClusterAggregate.java   |  73 ++++++++
 .../sink/timeline/MetricHostAggregate.java      |  81 +++++++++
 .../metrics2/sink/timeline/TimelineMetric.java  |   6 +-
 .../TimelineMetricWithAggregatedValues.java     |  65 +++++++
 .../AbstractTimelineMetricSinkTest.java         |  10 ++
 .../availability/MetricCollectorHATest.java     |  10 ++
 .../cache/HandleConnectExceptionTest.java       |  10 ++
 .../sink/flume/FlumeTimelineMetricsSink.java    |  16 ++
 .../timeline/HadoopTimelineMetricsSink.java     |  20 ++-
 .../conf/unix/log4j.properties                  |  31 ++++
 .../conf/windows/log4j.properties               |  29 +++
 .../ambari-metrics-host-aggregator/pom.xml      | 120 +++++++++++++
 .../AbstractMetricPublisherThread.java          | 134 ++++++++++++++
 .../aggregator/AggregatedMetricsPublisher.java  | 101 +++++++++++
 .../host/aggregator/AggregatorApplication.java  | 180 +++++++++++++++++++
 .../host/aggregator/AggregatorWebService.java   |  56 ++++++
 .../host/aggregator/RawMetricsPublisher.java    |  60 +++++++
 .../host/aggregator/TimelineMetricsHolder.java  |  98 ++++++++++
 .../conf/unix/ambari-metrics-monitor            |   2 +-
 .../src/main/python/core/aggregator.py          | 110 ++++++++++++
 .../src/main/python/core/config_reader.py       |  35 +++-
 .../src/main/python/core/controller.py          |  28 +++
 .../src/main/python/core/emitter.py             |   8 +-
 .../src/main/python/core/stop_handler.py        |   3 +-
 .../src/main/python/main.py                     |   6 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |  17 ++
 .../storm/StormTimelineMetricsReporter.java     |  14 ++
 .../sink/storm/StormTimelineMetricsSink.java    |  14 ++
 .../storm/StormTimelineMetricsReporter.java     |  16 ++
 .../sink/storm/StormTimelineMetricsSink.java    |  16 ++
 .../timeline/HBaseTimelineMetricStore.java      |  29 ++-
 .../metrics/timeline/PhoenixHBaseAccessor.java  |   4 +-
 .../timeline/TimelineMetricConfiguration.java   |   2 +
 .../metrics/timeline/TimelineMetricStore.java   |   2 +
 .../timeline/TimelineMetricsAggregatorSink.java |   4 +-
 .../timeline/aggregators/MetricAggregate.java   | 110 ------------
 .../aggregators/MetricClusterAggregate.java     |  73 --------
 .../aggregators/MetricHostAggregate.java        |  81 ---------
 .../TimelineMetricAppAggregator.java            |   1 +
 .../TimelineMetricClusterAggregator.java        |   2 +
 .../TimelineMetricClusterAggregatorSecond.java  |   1 +
 .../TimelineMetricHostAggregator.java           |   1 +
 .../aggregators/TimelineMetricReadHelper.java   |   2 +
 .../webapp/TimelineWebServices.java             |  31 ++++
 .../timeline/ITPhoenixHBaseAccessor.java        |   4 +-
 .../metrics/timeline/MetricTestHelper.java      |   2 +-
 .../timeline/PhoenixHBaseAccessorTest.java      |   4 +-
 .../timeline/TestMetricHostAggregate.java       |   8 +-
 .../timeline/TestTimelineMetricStore.java       |   6 +
 .../TimelineMetricsAggregatorMemorySink.java    |   4 +-
 .../aggregators/ITClusterAggregator.java        |   4 +-
 .../aggregators/ITMetricAggregator.java         |  13 +-
 ...melineMetricClusterAggregatorSecondTest.java |   1 +
 ambari-metrics/pom.xml                          |   1 +
 .../system/impl/AmbariMetricSinkImpl.java       |  10 ++
 .../1.6.1.2.2.0/package/scripts/params.py       |   2 +
 .../hadoop-metrics2-accumulo.properties.j2      |   3 +
 .../0.1.0/configuration/ams-env.xml             |   8 +
 .../0.1.0/configuration/ams-site.xml            |  11 ++
 .../AMBARI_METRICS/0.1.0/metainfo.xml           |   3 +
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |  30 ++++
 .../0.1.0/package/scripts/params.py             |   5 +
 .../hadoop-metrics2-hbase.properties.j2         |   3 +
 .../package/templates/metric_monitor.ini.j2     |   7 +
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |   3 +
 .../templates/flume-metrics2.properties.j2      |   2 +
 .../0.96.0.2.0/package/scripts/params_linux.py  |   3 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |   2 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |   2 +
 .../hadoop-metrics2.properties.xml              |   2 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |   2 +
 .../hadoop-metrics2-hivemetastore.properties.j2 |   2 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |   2 +
 .../templates/hadoop-metrics2-llapdaemon.j2     |   2 +
 .../hadoop-metrics2-llaptaskscheduler.j2        |   2 +
 .../2.1.0.3.0/package/scripts/params_linux.py   |   3 +
 .../hadoop-metrics2-hivemetastore.properties.j2 |   2 +
 .../hadoop-metrics2-hiveserver2.properties.j2   |   2 +
 .../templates/hadoop-metrics2-llapdaemon.j2     |   2 +
 .../hadoop-metrics2-llaptaskscheduler.j2        |   2 +
 .../KAFKA/0.8.1/configuration/kafka-broker.xml  |  11 ++
 .../KAFKA/0.8.1/package/scripts/params.py       |   3 +
 .../STORM/0.9.1/package/scripts/params_linux.py |   2 +
 .../0.9.1/package/templates/config.yaml.j2      |   2 +
 .../templates/storm-metrics2.properties.j2      |   2 +
 .../2.0.6/hooks/before-START/scripts/params.py  |   3 +
 .../templates/hadoop-metrics2.properties.j2     |   2 +
 .../hadoop-metrics2.properties.xml              |   2 +
 .../3.0/hooks/before-START/scripts/params.py    |   2 +
 .../templates/hadoop-metrics2.properties.j2     |   2 +
 .../system/impl/TestAmbariMetricsSinkImpl.java  |  10 ++
 .../2.0/hooks/before-START/scripts/params.py    |   2 +
 99 files changed, 1854 insertions(+), 307 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
index 2d1bf40..39526a5 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
@@ -89,6 +89,16 @@ public class LogFeederAMSClient extends AbstractTimelineMetricsSink {
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return false;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return 0;
+  }
+
+  @Override
   protected boolean emitMetrics(TimelineMetrics metrics) {
     return super.emitMetrics(metrics);
   }
@@ -103,4 +113,4 @@ public class LogFeederAMSClient extends AbstractTimelineMetricsSink {
     return collectorPort;
   }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-assembly/pom.xml b/ambari-metrics/ambari-metrics-assembly/pom.xml
index a4b87de..6b81de5 100644
--- a/ambari-metrics/ambari-metrics-assembly/pom.xml
+++ b/ambari-metrics/ambari-metrics-assembly/pom.xml
@@ -35,6 +35,7 @@
   <properties>
     <collector.dir>${project.basedir}/../ambari-metrics-timelineservice</collector.dir>
     <monitor.dir>${project.basedir}/../ambari-metrics-host-monitoring</monitor.dir>
+    <aggregator.dir>${project.basedir}/../ambari-metrics-host-aggregator</aggregator.dir>
     <grafana.dir>${project.basedir}/../ambari-metrics-grafana</grafana.dir>
     <hadoop-sink.dir>${project.basedir}/../ambari-metrics-hadoop-sink</hadoop-sink.dir>
     <storm-sink.dir>${project.basedir}/../ambari-metrics-storm-sink</storm-sink.dir>
@@ -599,6 +600,19 @@
                       </sources>
                     </mapping>
                     <mapping>
+                      <directory>/var/lib/ambari-metrics-monitor/lib</directory>
+                      <sources>
+                        <source>
+                          <location>
+                            ${aggregator.dir}/target/
+                          </location>
+                          <includes>
+                            <include>ambari-metrics-host-aggregator-${project.version}.jar</include>
+                          </includes>
+                        </source>
+                      </sources>
+                    </mapping>
+                    <mapping>
                       <directory>/etc/ambari-metrics-monitor/conf</directory>
                       <configuration>true</configuration>
                     </mapping>
@@ -744,6 +758,7 @@
                     <path>/var/run/ambari-metrics-grafana</path>
                     <path>/var/log/ambari-metrics-grafana</path>
                     <path>/var/lib/ambari-metrics-collector</path>
+                    <path>/var/lib/ambari-metrics-monitor/lib</path>
                     <path>/var/lib/ambari-metrics-grafana</path>
                     <path>/usr/lib/ambari-metrics-hadoop-sink</path>
                     <path>/usr/lib/ambari-metrics-kafka-sink</path>
@@ -1331,6 +1346,11 @@
       <type>pom</type>
       <optional>true</optional>
     </dependency>
+    <dependency>
+      <groupId>org.apache.ambari</groupId>
+      <artifactId>ambari-metrics-host-aggregator</artifactId>
+      <version>${project.version}</version>
+    </dependency>
   </dependencies>
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
index ab309a1..d015d31 100644
--- a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
+++ b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor-windows.xml
@@ -64,6 +64,13 @@
       </includes>
     </fileSet>
     <fileSet>
+      <directory>${aggregator.dir}/conf/windows</directory>
+      <outputDirectory>conf</outputDirectory>
+      <includes>
+        <include>log4j.properties</include>
+      </includes>
+    </fileSet>
+    <fileSet>
       <directory>${monitor.dir}/conf/windows</directory>
       <outputDirectory>/</outputDirectory>
       <includes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
index 99a41c3..448fe62 100644
--- a/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
+++ b/ambari-metrics/ambari-metrics-assembly/src/main/assembly/monitor.xml
@@ -46,6 +46,13 @@
       </includes>
     </fileSet>
     <fileSet>
+      <directory>${aggregator.dir}/conf/unix</directory>
+      <outputDirectory>conf</outputDirectory>
+      <includes>
+        <include>log4j.properties</include>
+      </includes>
+    </fileSet>
+    <fileSet>
       <directory>${monitor.dir}/conf/unix</directory>
       <outputDirectory>bin</outputDirectory>
       <includes>
@@ -68,4 +75,4 @@
 
 
 
-</assembly>
\ No newline at end of file
+</assembly>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
index 2c6fae2..a8dc571 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
@@ -78,6 +78,8 @@ public abstract class AbstractTimelineMetricsSink {
   public static final String SSL_KEYSTORE_PATH_PROPERTY = "truststore.path";
   public static final String SSL_KEYSTORE_TYPE_PROPERTY = "truststore.type";
   public static final String SSL_KEYSTORE_PASSWORD_PROPERTY = "truststore.password";
+  public static final String HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY = "host_in_memory_aggregation";
+  public static final String HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY = "host_in_memory_aggregation_port";
   public static final String COLLECTOR_LIVE_NODES_PATH = "/ws/v1/timeline/metrics/livenodes";
   public static final String INSTANCE_ID_PROPERTY = "instanceId";
   public static final String SET_INSTANCE_ID_PROPERTY = "set.instanceId";
@@ -241,8 +243,14 @@ public abstract class AbstractTimelineMetricsSink {
   }
 
   protected boolean emitMetrics(TimelineMetrics metrics) {
-    String collectorHost = getCurrentCollectorHost();
-    String connectUrl = getCollectorUri(collectorHost);
+    String connectUrl;
+    if (isHostInMemoryAggregationEnabled()) {
+      connectUrl = constructTimelineMetricUri("http", "localhost", String.valueOf(getHostInMemoryAggregationPort()));
+    } else {
+      String collectorHost  = getCurrentCollectorHost();
+      connectUrl = getCollectorUri(collectorHost);
+    }
+
     String jsonData = null;
     LOG.debug("EmitMetrics connectUrl = "  + connectUrl);
     try {
@@ -562,4 +570,16 @@ public abstract class AbstractTimelineMetricsSink {
    * @return String "host1"
    */
   abstract protected String getHostname();
+
+  /**
+   * Check if host in-memory aggregation is enabled
+   * @return
+   */
+  abstract protected boolean isHostInMemoryAggregationEnabled();
+
+  /**
+   * In memory aggregation port
+   * @return
+   */
+  abstract protected int getHostInMemoryAggregationPort();
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java
new file mode 100644
index 0000000..c903e3d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AggregationResult.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.Set;
+
+@XmlRootElement(name="AggregationResult")
+public class AggregationResult {
+    protected Set<TimelineMetricWithAggregatedValues> result;
+    protected Long timeInMilis;
+
+    @Override
+    public String toString() {
+        return "AggregationResult{" +
+                "result=" + result +
+                ", timeInMilis=" + timeInMilis +
+                '}';
+    }
+
+    public AggregationResult() {
+    }
+
+    public AggregationResult(Set<TimelineMetricWithAggregatedValues> result, Long timeInMilis) {
+        this.result = result;
+        this.timeInMilis = timeInMilis;
+    }
+    @XmlElement
+    public Set<TimelineMetricWithAggregatedValues> getResult() {
+        return result;
+    }
+
+    public void setResult(Set<TimelineMetricWithAggregatedValues> result) {
+        this.result = result;
+    }
+    @XmlElement
+    public Long getTimeInMilis() {
+        return timeInMilis;
+    }
+
+    public void setTimeInMilis(Long timeInMilis) {
+        this.timeInMilis = timeInMilis;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java
new file mode 100644
index 0000000..84cba0e
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricAggregate.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.annotate.JsonSubTypes;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+
+/**
+*
+*/
+@JsonSubTypes({@JsonSubTypes.Type(value = MetricClusterAggregate.class),
+  @JsonSubTypes.Type(value = MetricHostAggregate.class)})
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class MetricAggregate {
+  private static final ObjectMapper mapper = new ObjectMapper();
+
+  protected Double sum = 0.0;
+  protected Double deviation;
+  protected Double max = Double.MIN_VALUE;
+  protected Double min = Double.MAX_VALUE;
+
+  public MetricAggregate() {
+  }
+
+  MetricAggregate(Double sum, Double deviation, Double max,
+                  Double min) {
+    this.sum = sum;
+    this.deviation = deviation;
+    this.max = max;
+    this.min = min;
+  }
+
+  public void updateSum(Double sum) {
+    this.sum += sum;
+  }
+
+  public void updateMax(Double max) {
+    if (max > this.max) {
+      this.max = max;
+    }
+  }
+
+  public void updateMin(Double min) {
+    if (min < this.min) {
+      this.min = min;
+    }
+  }
+
+  @JsonProperty("sum")
+  public Double getSum() {
+    return sum;
+  }
+
+  @JsonProperty("deviation")
+  public Double getDeviation() {
+    return deviation;
+  }
+
+  @JsonProperty("max")
+  public Double getMax() {
+    return max;
+  }
+
+  @JsonProperty("min")
+  public Double getMin() {
+    return min;
+  }
+
+  public void setSum(Double sum) {
+    this.sum = sum;
+  }
+
+  public void setDeviation(Double deviation) {
+    this.deviation = deviation;
+  }
+
+  public void setMax(Double max) {
+    this.max = max;
+  }
+
+  public void setMin(Double min) {
+    this.min = min;
+  }
+
+  public String toJSON() throws IOException {
+    return mapper.writeValueAsString(this);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java
new file mode 100644
index 0000000..7ef2c1d
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricClusterAggregate.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+*
+*/
+public class MetricClusterAggregate extends MetricAggregate {
+  private int numberOfHosts;
+
+  @JsonCreator
+  public MetricClusterAggregate() {
+  }
+
+  public MetricClusterAggregate(Double sum, int numberOfHosts, Double deviation,
+                         Double max, Double min) {
+    super(sum, deviation, max, min);
+    this.numberOfHosts = numberOfHosts;
+  }
+
+  @JsonProperty("numberOfHosts")
+  public int getNumberOfHosts() {
+    return numberOfHosts;
+  }
+
+  public void updateNumberOfHosts(int count) {
+    this.numberOfHosts += count;
+  }
+
+  public void setNumberOfHosts(int numberOfHosts) {
+    this.numberOfHosts = numberOfHosts;
+  }
+
+  /**
+   * Find and update min, max and avg for a minute
+   */
+  public void updateAggregates(MetricClusterAggregate hostAggregate) {
+    updateMax(hostAggregate.getMax());
+    updateMin(hostAggregate.getMin());
+    updateSum(hostAggregate.getSum());
+    updateNumberOfHosts(hostAggregate.getNumberOfHosts());
+  }
+
+  @Override
+  public String toString() {
+    return "MetricAggregate{" +
+      "sum=" + sum +
+      ", numberOfHosts=" + numberOfHosts +
+      ", deviation=" + deviation +
+      ", max=" + max +
+      ", min=" + min +
+      '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java
new file mode 100644
index 0000000..e190913
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/MetricHostAggregate.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonProperty;
+
+/**
+ * Represents a collection of minute based aggregation of values for
+ * resolution greater than a minute.
+ */
+public class MetricHostAggregate extends MetricAggregate {
+
+  private long numberOfSamples = 0;
+
+  @JsonCreator
+  public MetricHostAggregate() {
+    super(0.0, 0.0, Double.MIN_VALUE, Double.MAX_VALUE);
+  }
+
+  public MetricHostAggregate(Double sum, int numberOfSamples,
+                             Double deviation,
+                             Double max, Double min) {
+    super(sum, deviation, max, min);
+    this.numberOfSamples = numberOfSamples;
+  }
+
+  @JsonProperty("numberOfSamples")
+  public long getNumberOfSamples() {
+    return numberOfSamples == 0 ? 1 : numberOfSamples;
+  }
+
+  public void updateNumberOfSamples(long count) {
+    this.numberOfSamples += count;
+  }
+
+  public void setNumberOfSamples(long numberOfSamples) {
+    this.numberOfSamples = numberOfSamples;
+  }
+
+  public double calculateAverage() {
+    return sum / numberOfSamples;
+  }
+
+  /**
+   * Find and update min, max and avg for a minute
+   */
+  public void updateAggregates(MetricHostAggregate hostAggregate) {
+    updateMax(hostAggregate.getMax());
+    updateMin(hostAggregate.getMin());
+    updateSum(hostAggregate.getSum());
+    updateNumberOfSamples(hostAggregate.getNumberOfSamples());
+  }
+
+  @Override
+  public String toString() {
+    return "MetricHostAggregate{" +
+      "sum=" + sum +
+      ", numberOfSamples=" + numberOfSamples +
+      ", deviation=" + deviation +
+      ", max=" + max +
+      ", min=" + min +
+      '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
index 44c9d4a..edace52 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetric.java
@@ -45,7 +45,7 @@ public class TimelineMetric implements Comparable<TimelineMetric> {
   private String type;
   private String units;
   private TreeMap<Long, Double> metricValues = new TreeMap<Long, Double>();
-  private Map<String, String> metadata = new HashMap<>();
+  private HashMap<String, String> metadata = new HashMap<>();
 
   // default
   public TimelineMetric() {
@@ -151,11 +151,11 @@ public class TimelineMetric implements Comparable<TimelineMetric> {
   }
 
   @XmlElement(name = "metadata")
-  public Map<String,String> getMetadata () {
+  public HashMap<String,String> getMetadata () {
     return metadata;
   }
 
-  public void setMetadata (Map<String,String> metadata) {
+  public void setMetadata (HashMap<String,String> metadata) {
     this.metadata = metadata;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java
new file mode 100644
index 0000000..626ac5f
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/TimelineMetricWithAggregatedValues.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.sink.timeline;
+
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+
+@XmlRootElement(name = "TimelineMetricWithAggregatedValues")
+@XmlAccessorType(XmlAccessType.NONE)
+public class TimelineMetricWithAggregatedValues {
+    private TimelineMetric timelineMetric;
+    private MetricHostAggregate metricAggregate;
+
+    public TimelineMetricWithAggregatedValues() {
+    }
+
+    public TimelineMetricWithAggregatedValues(TimelineMetric metric, MetricHostAggregate metricAggregate) {
+        timelineMetric = metric;
+        this.metricAggregate = metricAggregate;
+    }
+
+    @XmlElement
+    public MetricHostAggregate getMetricAggregate() {
+        return metricAggregate;
+    }
+    @XmlElement
+    public TimelineMetric getTimelineMetric() {
+        return timelineMetric;
+    }
+
+    public void setTimelineMetric(TimelineMetric timelineMetric) {
+        this.timelineMetric = timelineMetric;
+    }
+
+    public void setMetricAggregate(MetricHostAggregate metricAggregate) {
+        this.metricAggregate = metricAggregate;
+    }
+
+    @Override
+    public String toString() {
+        return "TimelineMetricWithAggregatedValues{" +
+                "timelineMetric=" + timelineMetric +
+                ", metricAggregate=" + metricAggregate +
+                '}';
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
index 9b0cdbe..ce2cf79 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/AbstractTimelineMetricSinkTest.java
@@ -90,6 +90,16 @@ public class AbstractTimelineMetricSinkTest {
     }
 
     @Override
+    protected boolean isHostInMemoryAggregationEnabled() {
+      return true;
+    }
+
+    @Override
+    protected int getHostInMemoryAggregationPort() {
+      return 61888;
+    }
+
+    @Override
     public boolean emitMetrics(TimelineMetrics metrics) {
       super.init();
       return super.emitMetrics(metrics);

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
index a393a96..f0174d5 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
@@ -192,5 +192,15 @@ public class MetricCollectorHATest {
     protected String getHostname() {
       return "h1";
     }
+
+    @Override
+    protected boolean isHostInMemoryAggregationEnabled() {
+      return true;
+    }
+
+    @Override
+    protected int getHostInMemoryAggregationPort() {
+      return 61888;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
index 32fe32e..4eb75eb 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/cache/HandleConnectExceptionTest.java
@@ -125,6 +125,16 @@ public class HandleConnectExceptionTest {
     }
 
     @Override
+    protected boolean isHostInMemoryAggregationEnabled() {
+      return false;
+    }
+
+    @Override
+    protected int getHostInMemoryAggregationPort() {
+      return 61888;
+    }
+
+    @Override
     public boolean emitMetrics(TimelineMetrics metrics) {
       super.init();
       return super.emitMetrics(metrics);

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
index 904c916..6277907 100644
--- a/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-flume-sink/src/main/java/org/apache/hadoop/metrics2/sink/flume/FlumeTimelineMetricsSink.java
@@ -63,6 +63,9 @@ public class FlumeTimelineMetricsSink extends AbstractTimelineMetricsSink implem
   private int timeoutSeconds = 10;
   private boolean setInstanceId;
   private String instanceId;
+  private boolean hostInMemoryAggregationEnabled;
+  private int hostInMemoryAggregationPort;
+
 
   @Override
   public void start() {
@@ -110,6 +113,9 @@ public class FlumeTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     port = configuration.getProperty(COLLECTOR_PORT, "6188");
     setInstanceId = Boolean.valueOf(configuration.getProperty(SET_INSTANCE_ID_PROPERTY, "false"));
     instanceId = configuration.getProperty(INSTANCE_ID_PROPERTY, "");
+
+    hostInMemoryAggregationEnabled = Boolean.getBoolean(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY));
+    hostInMemoryAggregationPort = Integer.valueOf(configuration.getProperty(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY));
     // Initialize the collector write strategy
     super.init();
 
@@ -162,6 +168,16 @@ public class FlumeTimelineMetricsSink extends AbstractTimelineMetricsSink implem
     return hostname;
   }
 
+  @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
   public void setPollFrequency(long pollFrequency) {
     this.pollFrequency = pollFrequency;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
index 11e16c2..c235c7c 100644
--- a/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-hadoop-sink/src/main/java/org/apache/hadoop/metrics2/sink/timeline/HadoopTimelineMetricsSink.java
@@ -75,6 +75,8 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
       return t;
     }
   });
+  private int hostInMemoryAggregationPort;
+  private boolean hostInMemoryAggregationEnabled;
 
   @Override
   public void init(SubsetConfiguration conf) {
@@ -107,7 +109,8 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
     protocol = conf.getString(COLLECTOR_PROTOCOL, "http");
     collectorHosts = parseHostsStringArrayIntoCollection(conf.getStringArray(COLLECTOR_HOSTS_PROPERTY));
     port = conf.getString(COLLECTOR_PORT, "6188");
-
+    hostInMemoryAggregationEnabled = conf.getBoolean(HOST_IN_MEMORY_AGGREGATION_ENABLED_PROPERTY);
+    hostInMemoryAggregationPort = conf.getInt(HOST_IN_MEMORY_AGGREGATION_PORT_PROPERTY);
     if (collectorHosts.isEmpty()) {
       LOG.error("No Metric collector configured.");
     } else {
@@ -249,6 +252,16 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
   }
 
   @Override
+  protected boolean isHostInMemoryAggregationEnabled() {
+    return hostInMemoryAggregationEnabled;
+  }
+
+  @Override
+  protected int getHostInMemoryAggregationPort() {
+    return hostInMemoryAggregationPort;
+  }
+
+  @Override
   public void putMetrics(MetricsRecord record) {
     try {
       String recordName = record.name();
@@ -308,9 +321,10 @@ public class HadoopTimelineMetricsSink extends AbstractTimelineMetricsSink imple
 
       int sbBaseLen = sb.length();
       List<TimelineMetric> metricList = new ArrayList<TimelineMetric>();
-      Map<String, String> metadata = null;
+      HashMap<String, String> metadata = null;
       if (skipAggregation) {
-        metadata = Collections.singletonMap("skipAggregation", "true");
+        metadata = new HashMap<>();
+        metadata.put("skipAggregation", "true");
       }
       long startTime = record.timestamp();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties b/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties
new file mode 100644
index 0000000..d7ceedd
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/conf/unix/log4j.properties
@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Define some default values that can be overridden by system properties
+# Root logger option
+log4j.rootLogger=INFO,file
+
+# Direct log messages to a log file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=/var/log/ambari-metrics-monitor/ambari-metrics-aggregator.log
+log4j.appender.file.MaxFileSize=80MB
+log4j.appender.file.MaxBackupIndex=60
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties b/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties
new file mode 100644
index 0000000..d9aabab
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/conf/windows/log4j.properties
@@ -0,0 +1,29 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Define some default values that can be overridden by system properties
+# Root logger option
+log4j.rootLogger=INFO,file
+
+# Direct log messages to a log file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=\\var\\log\\ambari-metrics-monitor\\ambari-metrics-aggregator.log
+log4j.appender.file.MaxFileSize=80MB
+log4j.appender.file.MaxBackupIndex=60
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/pom.xml b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
new file mode 100644
index 0000000..c2c7897
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/pom.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>ambari-metrics</artifactId>
+        <groupId>org.apache.ambari</groupId>
+        <version>2.0.0.0-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>ambari-metrics-host-aggregator</artifactId>
+    <packaging>jar</packaging>
+
+    <name>ambari-metrics-host-aggregator</name>
+    <url>http://maven.apache.org</url>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>3.8.1</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>14.0.1</version>
+        </dependency>
+        <dependency>
+              <groupId>org.apache.ambari</groupId>
+              <artifactId>ambari-metrics-common</artifactId>
+              <version>2.0.0.0-SNAPSHOT</version>
+        </dependency>
+        <dependency>
+            <groupId>javax.servlet</groupId>
+            <artifactId>servlet-api</artifactId>
+            <version>2.5</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-json</artifactId>
+            <version>1.11</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-server</artifactId>
+            <version>1.11</version>
+        </dependency>
+        <dependency>
+            <groupId>javax.xml.bind</groupId>
+            <artifactId>jaxb-api</artifactId>
+            <version>2.2.2</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-core</artifactId>
+            <version>1.11</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-common</artifactId>
+            <version>2.7.1.2.3.4.0-3347</version>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-shade-plugin</artifactId>
+                <version>1.6</version>
+                <configuration>
+                    <createDependencyReducedPom>true</createDependencyReducedPom>
+                    <filters>
+                        <filter>
+                            <artifact>*:*</artifact>
+                            <excludes>
+                                <exclude>META-INF/*.SF</exclude>
+                                <exclude>META-INF/*.DSA</exclude>
+                                <exclude>META-INF/*.RSA</exclude>
+                            </excludes>
+                        </filter>
+                    </filters>
+                </configuration>
+
+                <executions>
+                    <execution>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java
new file mode 100644
index 0000000..b1f60fa
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AbstractMetricPublisherThread.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+import org.codehaus.jackson.map.AnnotationIntrospector;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.codehaus.jackson.xc.JaxbAnnotationIntrospector;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.Map;
+
+/**
+ * Abstract class that runs a thread that publishes metrics data to AMS collector in specified intervals.
+ */
+public abstract class AbstractMetricPublisherThread extends Thread {
+    protected int publishIntervalInSeconds;
+    protected String publishURL;
+    protected ObjectMapper objectMapper;
+    private Log LOG;
+    protected TimelineMetricsHolder timelineMetricsHolder;
+
+    public AbstractMetricPublisherThread(TimelineMetricsHolder timelineMetricsHolder, String publishURL, int publishIntervalInSeconds) {
+        LOG = LogFactory.getLog(this.getClass());
+        this.publishURL = publishURL;
+        this.publishIntervalInSeconds = publishIntervalInSeconds;
+        this.timelineMetricsHolder = timelineMetricsHolder;
+        objectMapper = new ObjectMapper();
+        AnnotationIntrospector introspector = new JaxbAnnotationIntrospector();
+        objectMapper.setAnnotationIntrospector(introspector);
+        objectMapper.getSerializationConfig()
+                .withSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
+    }
+
+    /**
+     * Publishes metrics to collector in specified intervals while not interrupted.
+     */
+    @Override
+    public void run() {
+        while (!isInterrupted()) {
+            try {
+                sleep(this.publishIntervalInSeconds * 1000);
+            } catch (InterruptedException e) {
+                //Ignore
+            }
+            try {
+                processAndPublishMetrics(getMetricsFromCache());
+            } catch (Exception e) {
+                LOG.error("Couldn't process and send metrics : ",e);
+            }
+        }
+    }
+
+    /**
+     * Processes and sends metrics to collector.
+     * @param metricsFromCache
+     * @throws Exception
+     */
+    protected void processAndPublishMetrics(Map<Long, TimelineMetrics> metricsFromCache) throws Exception {
+        if (metricsFromCache.size()==0) return;
+
+        LOG.info(String.format("Preparing %s timeline metrics for publishing", metricsFromCache.size()));
+        publishMetricsJson(processMetrics(metricsFromCache));
+    }
+
+    /**
+     * Returns metrics map. Source is based on implementation.
+     * @return
+     */
+    protected abstract Map<Long,TimelineMetrics> getMetricsFromCache();
+
+    /**
+     * Processes given metrics (aggregates or merges them) and converts them into json string that will be send to collector
+     * @param metricValues
+     * @return
+     */
+    protected abstract String processMetrics(Map<Long, TimelineMetrics> metricValues);
+
+    protected void publishMetricsJson(String jsonData) throws Exception {
+        int timeout = 5 * 1000;
+        HttpURLConnection connection = null;
+        if (this.publishURL == null) {
+            throw new IOException("Unknown URL. Unable to connect to metrics collector.");
+        }
+        LOG.info("Collector URL : " + publishURL);
+        connection = (HttpURLConnection) new URL(this.publishURL).openConnection();
+
+        connection.setRequestMethod("POST");
+        connection.setRequestProperty("Content-Type", "application/json");
+        connection.setRequestProperty("Connection", "Keep-Alive");
+        connection.setConnectTimeout(timeout);
+        connection.setReadTimeout(timeout);
+        connection.setDoOutput(true);
+
+        if (jsonData != null) {
+            try (OutputStream os = connection.getOutputStream()) {
+                os.write(jsonData.getBytes("UTF-8"));
+            }
+        }
+        int responseCode = connection.getResponseCode();
+        if (responseCode != 200) {
+            throw new Exception("responseCode is " + responseCode);
+        }
+        LOG.info("Successfully sent metrics.");
+    }
+
+    /**
+     * Interrupts the thread.
+     */
+    protected void stopPublisher() {
+        this.interrupt();
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java
new file mode 100644
index 0000000..0540ec9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatedMetricsPublisher.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.AggregationResult;
+import org.apache.hadoop.metrics2.sink.timeline.MetricHostAggregate;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetricWithAggregatedValues;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+/**
+ * Thread that aggregates and publishes metrics to collector on specified interval.
+ */
+public class AggregatedMetricsPublisher extends AbstractMetricPublisherThread {
+
+    private Log LOG;
+
+    public AggregatedMetricsPublisher(TimelineMetricsHolder timelineMetricsHolder, String collectorURL, int interval) {
+        super(timelineMetricsHolder, collectorURL, interval);
+        LOG = LogFactory.getLog(this.getClass());
+    }
+
+    /**
+     * get metrics map form @TimelineMetricsHolder
+     * @return
+     */
+    @Override
+    protected Map<Long, TimelineMetrics> getMetricsFromCache() {
+        return timelineMetricsHolder.extractMetricsForAggregationPublishing();
+    }
+
+    /**
+     * Aggregates given metrics and converts them into json string that will be send to collector
+     * @param metricForAggregationValues
+     * @return
+     */
+    @Override
+    protected String processMetrics(Map<Long, TimelineMetrics> metricForAggregationValues) {
+        HashMap<String, TimelineMetrics> nameToMetricMap = new HashMap<>();
+        for (TimelineMetrics timelineMetrics : metricForAggregationValues.values()) {
+            for (TimelineMetric timelineMetric : timelineMetrics.getMetrics()) {
+                if (!nameToMetricMap.containsKey(timelineMetric.getMetricName())) {
+                    nameToMetricMap.put(timelineMetric.getMetricName(), new TimelineMetrics());
+                }
+                nameToMetricMap.get(timelineMetric.getMetricName()).addOrMergeTimelineMetric(timelineMetric);
+            }
+        }
+        Set<TimelineMetricWithAggregatedValues> metricAggregateMap = new HashSet<>();
+        for (TimelineMetrics metrics : nameToMetricMap.values()) {
+            double sum = 0;
+            double max = Integer.MIN_VALUE;
+            double min = Integer.MAX_VALUE;
+            int count = 0;
+            for (TimelineMetric metric : metrics.getMetrics()) {
+                for (Double value : metric.getMetricValues().values()) {
+                    sum+=value;
+                    max = Math.max(max, value);
+                    min = Math.min(min, value);
+                    count++;
+                }
+            }
+            TimelineMetric tmpMetric = new TimelineMetric(metrics.getMetrics().get(0));
+            tmpMetric.setMetricValues(new TreeMap<Long, Double>());
+            metricAggregateMap.add(new TimelineMetricWithAggregatedValues(tmpMetric, new MetricHostAggregate(sum, count, 0d, max, min)));
+        }
+        String json = null;
+        try {
+            json = objectMapper.writeValueAsString(new AggregationResult(metricAggregateMap, System.currentTimeMillis()));
+            LOG.debug(json);
+        } catch (Exception e) {
+            LOG.error("Failed to convert result into json", e);
+        }
+
+        return json;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java
new file mode 100644
index 0000000..c6b703b
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorApplication.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+import com.sun.jersey.api.container.httpserver.HttpServerFactory;
+import com.sun.jersey.api.core.PackagesResourceConfig;
+import com.sun.jersey.api.core.ResourceConfig;
+import com.sun.net.httpserver.HttpServer;
+
+import javax.ws.rs.core.UriBuilder;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.URI;
+import java.net.URL;
+import java.net.UnknownHostException;
+import java.util.HashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * WEB application with 2 publisher threads that processes received metrics and submits results to the collector
+ */
+public class AggregatorApplication
+{
+    private static final int STOP_SECONDS_DELAY = 0;
+    private static final int JOIN_SECONDS_TIMEOUT = 2;
+    private static String BASE_POST_URL = "%s://%s:%s/ws/v1/timeline/metrics";
+    private static String AGGREGATED_POST_PREFIX = "/aggregated";
+    private static final String METRICS_SITE_CONFIGURATION_FILE = "ams-site.xml";
+    private static Log LOG = LogFactory.getLog("AggregatorApplication.class");
+    private final int webApplicationPort;
+    private final int rawPublishingInterval;
+    private final int aggregationInterval;
+    private Configuration configuration;
+    private String [] collectorHosts;
+    private AggregatedMetricsPublisher aggregatePublisher;
+    private RawMetricsPublisher rawPublisher;
+    private TimelineMetricsHolder timelineMetricsHolder;
+    private HttpServer httpServer;
+
+    public AggregatorApplication(String collectorHosts) {
+        initConfiguration();
+        this.collectorHosts = collectorHosts.split(",");
+        this.aggregationInterval = configuration.getInt("timeline.metrics.host.aggregator.minute.interval", 300);
+        this.rawPublishingInterval = configuration.getInt("timeline.metrics.sink.report.interval", 60);
+        this.webApplicationPort = configuration.getInt("timeline.metrics.host.inmemory.aggregation.port", 61888);
+        this.timelineMetricsHolder = TimelineMetricsHolder.getInstance(rawPublishingInterval, aggregationInterval);
+        try {
+            this.httpServer = createHttpServer();
+        } catch (IOException e) {
+            LOG.error("Exception while starting HTTP server. Exiting", e);
+            System.exit(1);
+        }
+    }
+
+    private void initConfiguration() {
+        ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+        if (classLoader == null) {
+            classLoader = getClass().getClassLoader();
+        }
+
+        URL amsResUrl = classLoader.getResource(METRICS_SITE_CONFIGURATION_FILE);
+        LOG.info("Found metric service configuration: " + amsResUrl);
+        if (amsResUrl == null) {
+            throw new IllegalStateException("Unable to initialize the metrics " +
+                    "subsystem. No ams-site present in the classpath.");
+        }
+        configuration = new Configuration(true);
+        try {
+            configuration.addResource(amsResUrl.toURI().toURL());
+        } catch (Exception e) {
+            LOG.error("Couldn't init configuration. ", e);
+            System.exit(1);
+        }
+    }
+
+    private String getHostName() {
+        String hostName = "localhost";
+        try {
+            hostName = InetAddress.getLocalHost().getCanonicalHostName();
+        } catch (UnknownHostException e) {
+            LOG.error(e);
+        }
+        return hostName;
+    }
+
+    private URI getURI() {
+        URI uri = UriBuilder.fromUri("http://" + getHostName() + "/").port(this.webApplicationPort).build();
+        LOG.info(String.format("Web server at %s", uri));
+        return uri;
+    }
+
+    private HttpServer createHttpServer() throws IOException {
+        ResourceConfig resourceConfig = new PackagesResourceConfig("org.apache.hadoop.metrics2.host.aggregator");
+        HashMap<String, Object> params = new HashMap();
+        params.put("com.sun.jersey.api.json.POJOMappingFeature", "true");
+        resourceConfig.setPropertiesAndFeatures(params);
+        return HttpServerFactory.create(getURI(), resourceConfig);
+    }
+
+    private void startWebServer() {
+        LOG.info("Starting web server.");
+        this.httpServer.start();
+    }
+
+    private void startAggregatePublisherThread() {
+        LOG.info("Starting aggregated metrics publisher.");
+        String collectorURL = buildBasicCollectorURL(collectorHosts[0]) + AGGREGATED_POST_PREFIX;
+        aggregatePublisher = new AggregatedMetricsPublisher(timelineMetricsHolder, collectorURL, aggregationInterval);
+        aggregatePublisher.start();
+    }
+
+    private void startRawPublisherThread() {
+        LOG.info("Starting raw metrics publisher.");
+        String collectorURL = buildBasicCollectorURL(collectorHosts[0]);
+        rawPublisher = new RawMetricsPublisher(timelineMetricsHolder, collectorURL, rawPublishingInterval);
+        rawPublisher.start();
+    }
+
+
+
+    private void stop() {
+        aggregatePublisher.stopPublisher();
+        rawPublisher.stopPublisher();
+        httpServer.stop(STOP_SECONDS_DELAY);
+        LOG.info("Stopped web server.");
+        try {
+            LOG.info("Waiting for threads to join.");
+            aggregatePublisher.join(JOIN_SECONDS_TIMEOUT * 1000);
+            rawPublisher.join(JOIN_SECONDS_TIMEOUT * 1000);
+            LOG.info("Gracefully stopped Aggregator Application.");
+        } catch (InterruptedException e) {
+            LOG.error("Received exception during stop : ", e);
+
+        }
+
+    }
+
+    private String buildBasicCollectorURL(String host) {
+        String port = configuration.get("timeline.metrics.service.webapp.address", "0.0.0.0:6188").split(":")[1];
+        String protocol = configuration.get("timeline.metrics.service.http.policy", "HTTP_ONLY").equalsIgnoreCase("HTTP_ONLY") ? "http" : "https";
+        return String.format(BASE_POST_URL, protocol, host, port);
+    }
+
+    public static void main( String[] args ) throws Exception {
+        LOG.info("Starting aggregator application");
+        if (args.length != 1) {
+            throw new Exception("This jar should be run with 1 argument - collector hosts separated with coma");
+        }
+
+        final AggregatorApplication app = new AggregatorApplication(args[0]);
+        app.startAggregatePublisherThread();
+        app.startRawPublisherThread();
+        app.startWebServer();
+
+        Runtime.getRuntime().addShutdownHook(new Thread() {
+            public void run() {
+                LOG.info("Stopping aggregator application");
+                app.stop();
+            }
+        });
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java
new file mode 100644
index 0000000..f96d0ed
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/AggregatorWebService.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+
+
+import com.sun.jersey.spi.resource.Singleton;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+
+@Singleton
+@Path("/ws/v1/timeline")
+public class AggregatorWebService {
+    TimelineMetricsHolder metricsHolder = TimelineMetricsHolder.getInstance();
+
+    @GET
+    @Produces("text/json")
+    @Path("/metrics")
+    public Response helloWorld() throws IOException {
+        return Response.ok().build();
+    }
+
+    @POST
+    @Produces(MediaType.TEXT_PLAIN)
+    @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+    @Path("/metrics")
+    public Response postMetrics(
+            TimelineMetrics metrics) {
+        metricsHolder.putMetricsForAggregationPublishing(metrics);
+        metricsHolder.putMetricsForRawPublishing(metrics);
+        return Response.ok().build();
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java
new file mode 100644
index 0000000..f317ed9
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/RawMetricsPublisher.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetric;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import java.util.Map;
+
+public class RawMetricsPublisher extends AbstractMetricPublisherThread {
+    private final Log LOG;
+
+    public RawMetricsPublisher(TimelineMetricsHolder timelineMetricsHolder, String collectorURL, int interval) {
+        super(timelineMetricsHolder, collectorURL, interval);
+        LOG = LogFactory.getLog(this.getClass());
+    }
+
+
+    @Override
+    protected Map<Long, TimelineMetrics> getMetricsFromCache() {
+        return timelineMetricsHolder.extractMetricsForRawPublishing();
+    }
+
+    @Override
+    protected String processMetrics(Map<Long, TimelineMetrics> metricValues) {
+        //merge everything in one TimelineMetrics object
+        TimelineMetrics timelineMetrics = new TimelineMetrics();
+        for (TimelineMetrics metrics : metricValues.values()) {
+            for (TimelineMetric timelineMetric : metrics.getMetrics())
+                timelineMetrics.addOrMergeTimelineMetric(timelineMetric);
+        }
+        //map TimelineMetrics to json string
+        String json = null;
+        try {
+            json = objectMapper.writeValueAsString(timelineMetrics);
+            LOG.debug(json);
+        } catch (Exception e) {
+            LOG.error("Failed to convert result into json", e);
+        }
+        return json;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java
new file mode 100644
index 0000000..b355c97
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-aggregator/src/main/java/org/apache/hadoop/metrics2/host/aggregator/TimelineMetricsHolder.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.host.aggregator;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import org.apache.hadoop.metrics2.sink.timeline.TimelineMetrics;
+
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Singleton class with 2 guava caches for raw and aggregated metrics storing
+ */
+public class TimelineMetricsHolder {
+    private static final int DEFAULT_RAW_CACHE_EXPIRE_TIME = 60;
+    private static final int DEFAULT_AGGREGATION_CACHE_EXPIRE_TIME = 300;
+    private Cache<Long, TimelineMetrics> aggregationMetricsCache;
+    private Cache<Long, TimelineMetrics> rawMetricsCache;
+    private static TimelineMetricsHolder instance = null;
+    //to ensure no metric values are expired
+    private static int EXPIRE_DELAY = 30;
+    ReadWriteLock aggregationCacheLock = new ReentrantReadWriteLock();
+    ReadWriteLock rawCacheLock = new ReentrantReadWriteLock();
+
+    private TimelineMetricsHolder(int rawCacheExpireTime, int aggregationCacheExpireTime) {
+        this.rawMetricsCache = CacheBuilder.newBuilder().expireAfterWrite(rawCacheExpireTime + EXPIRE_DELAY, TimeUnit.SECONDS).build();
+        this.aggregationMetricsCache = CacheBuilder.newBuilder().expireAfterWrite(aggregationCacheExpireTime + EXPIRE_DELAY, TimeUnit.SECONDS).build();
+    }
+
+    public static TimelineMetricsHolder getInstance(int rawCacheExpireTime, int aggregationCacheExpireTime) {
+        if (instance == null) {
+            instance = new TimelineMetricsHolder(rawCacheExpireTime, aggregationCacheExpireTime);
+        }
+        return instance;
+    }
+
+    /**
+     * Uses default expiration time for caches initialization if they are not initialized yet.
+     * @return
+     */
+    public static TimelineMetricsHolder getInstance() {
+        return getInstance(DEFAULT_RAW_CACHE_EXPIRE_TIME, DEFAULT_AGGREGATION_CACHE_EXPIRE_TIME);
+    }
+
+    public void putMetricsForAggregationPublishing(TimelineMetrics timelineMetrics) {
+        aggregationCacheLock.writeLock().lock();
+        aggregationMetricsCache.put(System.currentTimeMillis(), timelineMetrics);
+        aggregationCacheLock.writeLock().unlock();
+    }
+
+    public Map<Long, TimelineMetrics> extractMetricsForAggregationPublishing() {
+        return extractMetricsFromCacheWithLock(aggregationMetricsCache, aggregationCacheLock);
+    }
+
+    public void putMetricsForRawPublishing(TimelineMetrics metrics) {
+        rawCacheLock.writeLock().lock();
+        rawMetricsCache.put(System.currentTimeMillis(), metrics);
+        rawCacheLock.writeLock().unlock();
+    }
+
+    public Map<Long, TimelineMetrics> extractMetricsForRawPublishing() {
+        return extractMetricsFromCacheWithLock(rawMetricsCache, rawCacheLock);
+    }
+
+    /**
+     * Returns values from cache and clears the cache
+     * @param cache
+     * @param lock
+     * @return
+     */
+    private Map<Long, TimelineMetrics> extractMetricsFromCacheWithLock(Cache<Long, TimelineMetrics> cache, ReadWriteLock lock) {
+        lock.writeLock().lock();
+        Map<Long, TimelineMetrics> metricsMap = new TreeMap<>(cache.asMap());
+        cache.invalidateAll();
+        lock.writeLock().unlock();
+        return metricsMap;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
index 967e133..9bbb271 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
+++ b/ambari-metrics/ambari-metrics-host-monitoring/conf/unix/ambari-metrics-monitor
@@ -24,7 +24,7 @@ METRIC_MONITOR_PY_SCRIPT=${RESOURCE_MONITORING_DIR}/main.py
 PIDFILE=/var/run/ambari-metrics-monitor/ambari-metrics-monitor.pid
 OUTFILE=/var/log/ambari-metrics-monitor/ambari-metrics-monitor.out
 
-STOP_TIMEOUT=5
+STOP_TIMEOUT=10
 
 OK=0
 NOTOK=1

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py
new file mode 100644
index 0000000..2249e53
--- /dev/null
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/aggregator.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import threading
+import subprocess
+import logging
+import urllib2
+
+logger = logging.getLogger()
+class Aggregator(threading.Thread):
+  def __init__(self, config, stop_handler):
+    threading.Thread.__init__(self)
+    self._config = config
+    self._stop_handler = stop_handler
+    self._aggregator_process = None
+    self._sleep_interval = config.get_collector_sleep_interval()
+    self.stopped = False
+
+  def run(self):
+    java_home = self._config.get_java_home()
+    collector_hosts = self._config.get_metrics_collector_hosts_as_string()
+    jvm_agrs = self._config.get_aggregator_jvm_agrs()
+    config_dir = self._config.get_config_dir()
+    class_name = "org.apache.hadoop.metrics2.host.aggregator.AggregatorApplication"
+    ams_log_file = "ambari-metrics-aggregator.log"
+    additional_classpath = ':{0}'.format(config_dir)
+    ams_log_dir = self._config.ams_monitor_log_dir()
+    logger.info('Starting Aggregator thread.')
+    cmd = "{0}/bin/java {1} -Dams.log.dir={2} -Dams.log.file={3} -cp /var/lib/ambari-metrics-monitor/lib/*{4} {5} {6}"\
+      .format(java_home, jvm_agrs, ams_log_dir, ams_log_file, additional_classpath, class_name, collector_hosts)
+
+    logger.info("Executing : {0}".format(cmd))
+
+    self._aggregator_process = subprocess.Popen([cmd], stdout = None, stderr = None, shell = True)
+    while not self.stopped:
+      if 0 == self._stop_handler.wait(self._sleep_interval):
+        break
+    pass
+    self.stop()
+
+  def stop(self):
+    self.stopped = True
+    if self._aggregator_process :
+      logger.info('Stopping Aggregator thread.')
+      self._aggregator_process.terminate()
+
+class AggregatorWatchdog(threading.Thread):
+  SLEEP_TIME = 30
+  CONNECTION_TIMEOUT = 5
+  AMS_AGGREGATOR_METRICS_CHECK_URL = "/ws/v1/timeline/metrics/"
+  def __init__(self, config, stop_handler):
+    threading.Thread.__init__(self)
+    self._config = config
+    self._stop_handler = stop_handler
+    self.URL = 'http://localhost:' + self._config.get_inmemory_aggregation_port() + self.AMS_AGGREGATOR_METRICS_CHECK_URL
+    self._is_ok = threading.Event()
+    self.set_is_ok(True)
+    self.stopped = False
+
+  def run(self):
+    logger.info('Starting Aggregator Watchdog thread.')
+    while not self.stopped:
+      if 0 == self._stop_handler.wait(self.SLEEP_TIME):
+        break
+      try:
+        conn = urllib2.urlopen(self.URL, timeout=self.CONNECTION_TIMEOUT)
+        self.set_is_ok(True)
+      except (KeyboardInterrupt, SystemExit):
+        raise
+      except Exception, e:
+        self.set_is_ok(False)
+        continue
+      if conn.code != 200:
+        self.set_is_ok(False)
+        continue
+      conn.close()
+
+  def is_ok(self):
+    return self._is_ok.is_set()
+
+  def set_is_ok(self, value):
+    if value == False and self.is_ok() != value:
+      logger.warning("Watcher couldn't connect to aggregator.")
+      self._is_ok.clear()
+    else:
+      self._is_ok.set()
+
+
+  def stop(self):
+    logger.info('Stopping watcher thread.')
+    self.stopped = True
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/041d353b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index 2670e76..d1429ed 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -30,6 +30,8 @@ from ambari_commons.os_family_impl import OsFamilyImpl
 # Abstraction for OS-dependent configuration defaults
 #
 class ConfigDefaults(object):
+  def get_config_dir(self):
+    pass
   def get_config_file_path(self):
     pass
   def get_metric_file_path(self):
@@ -40,11 +42,14 @@ class ConfigDefaults(object):
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class ConfigDefaultsWindows(ConfigDefaults):
   def __init__(self):
+    self._CONFIG_DIR = "conf"
     self._CONFIG_FILE_PATH = "conf\\metric_monitor.ini"
     self._METRIC_FILE_PATH = "conf\\metric_groups.conf"
     self._METRIC_FILE_PATH = "conf\\ca.pem"
     pass
 
+  def get_config_dir(self):
+    return self._CONFIG_DIR
   def get_config_file_path(self):
     return self._CONFIG_FILE_PATH
   def get_metric_file_path(self):
@@ -55,11 +60,13 @@ class ConfigDefaultsWindows(ConfigDefaults):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ConfigDefaultsLinux(ConfigDefaults):
   def __init__(self):
+    self._CONFIG_DIR = "/etc/ambari-metrics-monitor/conf/"
     self._CONFIG_FILE_PATH = "/etc/ambari-metrics-monitor/conf/metric_monitor.ini"
     self._METRIC_FILE_PATH = "/etc/ambari-metrics-monitor/conf/metric_groups.conf"
     self._CA_CERTS_FILE_PATH = "/etc/ambari-metrics-monitor/conf/ca.pem"
     pass
-
+  def get_config_dir(self):
+    return self._CONFIG_DIR
   def get_config_file_path(self):
     return self._CONFIG_FILE_PATH
   def get_metric_file_path(self):
@@ -71,6 +78,7 @@ configDefaults = ConfigDefaults()
 
 config = ConfigParser.RawConfigParser()
 
+CONFIG_DIR = configDefaults.get_config_dir()
 CONFIG_FILE_PATH = configDefaults.get_config_file_path()
 METRIC_FILE_PATH = configDefaults.get_metric_file_path()
 CA_CERTS_FILE_PATH = configDefaults.get_ca_certs_file_path()
@@ -191,6 +199,8 @@ class Configuration:
         # No hostname script identified in the ambari agent conf
         pass
     pass
+  def get_config_dir(self):
+    return CONFIG_DIR
 
   def getConfig(self):
     return self.config
@@ -214,10 +224,14 @@ class Configuration:
   def get_hostname_config(self):
     return self.get("default", "hostname", None)
 
-  def get_metrics_collector_hosts(self):
+  def get_metrics_collector_hosts_as_list(self):
     hosts = self.get("default", "metrics_servers", "localhost")
     return hosts.split(",")
 
+  def get_metrics_collector_hosts_as_string(self):
+    hosts = self.get("default", "metrics_servers", "localhost")
+    return hosts
+
   def get_failover_strategy(self):
     return self.get("collector", "failover_strategy", ROUND_ROBIN_FAILOVER_STRATEGY)
 
@@ -239,6 +253,23 @@ class Configuration:
   def is_server_https_enabled(self):
     return "true" == str(self.get("collector", "https_enabled")).lower()
 
+  def get_java_home(self):
+    return self.get("aggregation", "java_home")
+
+  def is_inmemory_aggregation_enabled(self):
+    return "true" == str(self.get("aggregation", "host_in_memory_aggregation")).lower()
+
+  def get_inmemory_aggregation_port(self):
+    return self.get("aggregation", "host_in_memory_aggregation_port")
+
+  def get_aggregator_jvm_agrs(self):
+    hosts = self.get("aggregation", "jvm_arguments", "-Xmx256m -Xms128m -XX:PermSize=68m")
+    return hosts
+
+  def ams_monitor_log_dir(self):
+    hosts = self.get("aggregation", "ams_monitor_log_dir", "/var/log/ambari-metrics-monitor")
+    return hosts
+
   def is_set_instanceid(self):
     return "true" == str(self.get("default", "set.instanceId", 'false')).lower()
 


[07/25] ambari git commit: AMBARI-21027. HDP 3.0 TP - create Service Advisor for Sqoop.(vbrodetsky)

Posted by jl...@apache.org.
AMBARI-21027. HDP 3.0 TP - create Service Advisor for Sqoop.(vbrodetsky)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/81416654
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/81416654
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/81416654

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 81416654126fe044d1d36af87f40d5a4151f4bd0
Parents: 7799fb7
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed May 17 00:00:18 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed May 17 00:00:18 2017 +0300

----------------------------------------------------------------------
 .../SQOOP/1.4.4.3.0/service_advisor.py          | 197 +++++++++++++++++++
 1 file changed, 197 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/81416654/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/service_advisor.py
new file mode 100644
index 0000000..115ca06
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.3.0/service_advisor.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class SqoopServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(SqoopServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = SqoopRecommender()
+    recommender.recommendSqoopConfigurationsFromHDP23(configurations, clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = SqoopValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+
+class SqoopRecommender(service_advisor.ServiceAdvisor):
+  """
+  Sqoop Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(SqoopRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+  def recommendSqoopConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
+    putSqoopSiteProperty = self.putProperty(configurations, "sqoop-site", services)
+    putSqoopEnvProperty = self.putProperty(configurations, "sqoop-env", services)
+
+    enable_external_atlas_for_sqoop = False
+    enable_atlas_hook = False
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'sqoop-atlas-application.properties' in services['configurations'] and 'enable.external.atlas.for.sqoop' in services['configurations']['sqoop-atlas-application.properties']['properties']:
+      enable_external_atlas_for_sqoop = services['configurations']['sqoop-atlas-application.properties']['properties']['enable.external.atlas.for.sqoop'].lower() == "true"
+
+    if "ATLAS" in servicesList:
+      putSqoopEnvProperty("sqoop.atlas.hook", "true")
+    elif enable_external_atlas_for_sqoop:
+      putSqoopEnvProperty("sqoop.atlas.hook", "true")
+    else:
+      putSqoopEnvProperty("sqoop.atlas.hook", "false")
+
+    if 'sqoop-env' in configurations and 'sqoop.atlas.hook' in configurations['sqoop-env']['properties']:
+      enable_atlas_hook = configurations['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+    elif 'sqoop-env' in services['configurations'] and 'sqoop.atlas.hook' in services['configurations']['sqoop-env']['properties']:
+      enable_atlas_hook = services['configurations']['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+
+    if enable_atlas_hook:
+      putSqoopSiteProperty('sqoop.job.data.publish.class', 'org.apache.atlas.sqoop.hook.SqoopHook')
+    else:
+      putSqoopSitePropertyAttribute = self.putPropertyAttribute(configurations, "sqoop-site")
+      putSqoopSitePropertyAttribute('sqoop.job.data.publish.class', 'delete', 'true')
+
+
+
+class SqoopValidator(service_advisor.ServiceAdvisor):
+  """
+  Sqoop Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(SqoopValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = []
+
+
+
+
+


[18/25] ambari git commit: AMBARI-20532 Ambari-server CLI to setup Database Options Broken (dsen)

Posted by jl...@apache.org.
AMBARI-20532 Ambari-server CLI to setup Database Options Broken (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/772be786
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/772be786
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/772be786

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 772be786d930322e4a95e4755c36ffece24d30e4
Parents: 735c413
Author: Dmytro Sen <ds...@apache.org>
Authored: Wed May 17 19:07:44 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Wed May 17 19:07:44 2017 +0300

----------------------------------------------------------------------
 ambari-server/src/main/python/ambari-server.py  | 299 ++++++++------
 .../main/python/ambari_server/setupMpacks.py    |   7 +-
 .../src/test/python/TestAmbariServer.py         | 409 ++++++++++---------
 3 files changed, 389 insertions(+), 326 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/772be786/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 737be6a..4f680cb 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -367,7 +367,7 @@ def print_action_arguments_help(action):
             ";".join([print_opt for print_opt, _ in optional_options]))
 
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
-def init_parser_options(parser):
+def init_action_parser(action, parser):
   parser.add_option('-k', '--service-user-name', dest="svc_user",
                     default=None,
                     help="User account under which the Ambari Server service will run")
@@ -455,31 +455,58 @@ def init_parser_options(parser):
   # -h reserved for help
 
 @OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
-def init_parser_options(parser):
-  parser.add_option('-f', '--init-script-file', default=None,
-                    help="File with setup script")
-  parser.add_option('-r', '--drop-script-file', default=None,
-                    help="File with drop script")
-  parser.add_option('-u', '--upgrade-script-file', default=AmbariPath.get("/var/lib/"
-                                                           "ambari-server/resources/upgrade/ddl/"
-                                                           "Ambari-DDL-Postgres-UPGRADE-1.3.0.sql"),
-                    help="File with upgrade script")
-  parser.add_option('-t', '--upgrade-stack-script-file', default=AmbariPath.get("/var/lib/"
-                                                                 "ambari-server/resources/upgrade/dml/"
-                                                                 "Ambari-DML-Postgres-UPGRADE_STACK.sql"),
-                    help="File with stack upgrade script")
-  parser.add_option('-j', '--java-home', default=None,
-                    help="Use specified java_home.  Must be valid on all hosts")
-  parser.add_option("-v", "--verbose",
-                    action="store_true", dest="verbose", default=False,
-                    help="Print verbose status messages")
-  parser.add_option("-s", "--silent",
-                    action="store_true", dest="silent", default=False,
-                    help="Silently accepts default prompt values. For db-cleanup command, silent mode will stop ambari server.")
+def init_setup_parser_options(parser):
+  database_group = optparse.OptionGroup(parser, 'Database options (command need to include all options)')
+  database_group.add_option('--database', default=None, help="Database to use embedded|oracle|mysql|mssql|postgres|sqlanywhere", dest="dbms")
+  database_group.add_option('--databasehost', default=None, help="Hostname of database server", dest="database_host")
+  database_group.add_option('--databaseport', default=None, help="Database port", dest="database_port")
+  database_group.add_option('--databasename', default=None, help="Database/Service name or ServiceID",
+                            dest="database_name")
+  database_group.add_option('--databaseusername', default=None, help="Database user login", dest="database_username")
+  database_group.add_option('--databasepassword', default=None, help="Database user password", dest="database_password")
+  parser.add_option_group(database_group)
+
+  jdbc_group = optparse.OptionGroup(parser, 'JDBC options (command need to include all options)')
+  jdbc_group.add_option('--jdbc-driver', default=None, help="Specifies the path to the JDBC driver JAR file or archive " \
+                                                            "with all required files(jdbc jar, libraries and etc), for the " \
+                                                            "database type specified with the --jdbc-db option. " \
+                                                            "Used only with --jdbc-db option. Archive is supported only for" \
+                                                            " sqlanywhere database." ,
+                        dest="jdbc_driver")
+  jdbc_group.add_option('--jdbc-db', default=None, help="Specifies the database type [postgres|mysql|mssql|oracle|hsqldb|sqlanywhere] for the " \
+                                                        "JDBC driver specified with the --jdbc-driver option. Used only with --jdbc-driver option.",
+                        dest="jdbc_db")
+  parser.add_option_group(jdbc_group)
+
+  other_group = optparse.OptionGroup(parser, 'Other options')
+
+  other_group.add_option('-j', '--java-home', default=None,
+                         help="Use specified java_home.  Must be valid on all hosts")
+  other_group.add_option('--skip-view-extraction', action="store_true", default=False, help="Skip extraction of system views", dest="skip_view_extraction")
+  other_group.add_option('--postgresschema', default=None, help="Postgres database schema name",
+                         dest="postgres_schema")
+  other_group.add_option('--sqla-server-name', default=None, help="SQL Anywhere server name", dest="sqla_server_name")
+  other_group.add_option('--sidorsname', default="sname", help="Oracle database identifier type, Service ID/Service "
+                                                               "Name sid|sname", dest="sid_or_sname")
+
+  parser.add_option_group(other_group)
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_start_parser_options(parser):
   parser.add_option('-g', '--debug', action="store_true", dest='debug', default=False,
                     help="Start ambari-server in debug mode")
   parser.add_option('-y', '--suspend-start', action="store_true", dest='suspend_start', default=False,
                     help="Freeze ambari-server Java process at startup in debug mode")
+  parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation")
+  parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check")
+  parser.add_option('--auto-fix-database', action="store_true", default=False, help="Automatically fix database consistency issues", dest="fix_database_consistency")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_empty_parser_options(parser):
+  pass
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_ldap_sync_parser_options(parser):
   parser.add_option('--all', action="store_true", default=False, help="LDAP sync all option.  Synchronize all LDAP users and groups.",
                     dest="ldap_sync_all")
   parser.add_option('--existing', action="store_true", default=False,
@@ -488,79 +515,11 @@ def init_parser_options(parser):
                     dest="ldap_sync_users")
   parser.add_option('--groups', default=None, help="LDAP sync groups option.  Specifies the path to a CSV file of group names to be synchronized.",
                     dest="ldap_sync_groups")
-  parser.add_option('--database', default=None, help="Database to use embedded|oracle|mysql|mssql|postgres|sqlanywhere", dest="dbms")
-  parser.add_option('--databasehost', default=None, help="Hostname of database server", dest="database_host")
-  parser.add_option('--databaseport', default=None, help="Database port", dest="database_port")
-  parser.add_option('--databasename', default=None, help="Database/Service name or ServiceID",
-                    dest="database_name")
-  parser.add_option('--postgresschema', default=None, help="Postgres database schema name",
-                    dest="postgres_schema")
-  parser.add_option('--databaseusername', default=None, help="Database user login", dest="database_username")
-  parser.add_option('--databasepassword', default=None, help="Database user password", dest="database_password")
-  parser.add_option('--sidorsname', default="sname", help="Oracle database identifier type, Service ID/Service "
-                                                          "Name sid|sname", dest="sid_or_sname")
-  parser.add_option('--sqla-server-name', default=None, help="SQL Anywhere server name", dest="sqla_server_name")
-  parser.add_option('--jdbc-driver', default=None, help="Specifies the path to the JDBC driver JAR file or archive " \
-                                                        "with all required files(jdbc jar, libraries and etc), for the " \
-                                                        "database type specified with the --jdbc-db option. " \
-                                                        "Used only with --jdbc-db option. Archive is supported only for" \
-                                                        " sqlanywhere database." ,
-                    dest="jdbc_driver")
-  parser.add_option('--jdbc-db', default=None, help="Specifies the database type [postgres|mysql|mssql|oracle|hsqldb|sqlanywhere] for the " \
-                                                    "JDBC driver specified with the --jdbc-driver option. Used only with --jdbc-driver option.",
-                    dest="jdbc_db")
-  parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
-  parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
-  parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation")
-  parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check")
-  parser.add_option('--skip-view-extraction', action="store_true", default=False, help="Skip extraction of system views", dest="skip_view_extraction")
-  parser.add_option('--auto-fix-database', action="store_true", default=False, help="Automatically fix database consistency issues", dest="fix_database_consistency")
-  parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version")
-  parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string",
-                    help="Specify stack version that needs to be enabled. All other stacks versions will be disabled")
-  parser.add_option('--stack', dest="stack_name", default=None, type="string",
-                    help="Specify stack name for the stack versions that needs to be enabled")
-  parser.add_option("-d", "--from-date", dest="cleanup_from_date", default=None, type="string", help="Specify date for the cleanup process in 'yyyy-MM-dd' format")
-  add_parser_options('--mpack',
-      default=None,
-      help="Specify the path for management pack to be installed/upgraded",
-      dest="mpack_path",
-      parser=parser,
-      required_for_actions=[INSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION]
-  )
-  add_parser_options('--mpack-name',
-      default=None,
-      help="Specify the management pack name to be uninstalled",
-      dest="mpack_name",
-      parser=parser,
-      required_for_actions=[UNINSTALL_MPACK_ACTION]
-  )
-  add_parser_options('--purge',
-      action="store_true",
-      default=False,
-      help="Purge existing resources specified in purge-list",
-      dest="purge",
-      parser=parser,
-      optional_for_actions=[INSTALL_MPACK_ACTION]
-  )
-  purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
-  default_purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
-  add_parser_options('--purge-list',
-      default=default_purge_resources,
-      help="Comma separated list of resources to purge ({0}). By default ({1}) will be purged.".format(purge_resources, default_purge_resources),
-      dest="purge_list",
-      parser=parser,
-      optional_for_actions=[INSTALL_MPACK_ACTION]
-  )
-  add_parser_options('--force',
-      action="store_true",
-      default=False,
-      help="Force install management pack",
-      dest="force",
-      parser=parser,
-      optional_for_actions=[INSTALL_MPACK_ACTION]
-  )
+  parser.add_option('--ldap-sync-admin-name', default=None, help="Username for LDAP sync", dest="ldap_sync_admin_name")
+  parser.add_option('--ldap-sync-admin-password', default=None, help="Password for LDAP sync", dest="ldap_sync_admin_password")
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_ldap_setup_parser_options(parser):
   parser.add_option('--ldap-url', default=None, help="Primary url for LDAP", dest="ldap_url")
   parser.add_option('--ldap-secondary-url', default=None, help="Secondary url for LDAP", dest="ldap_secondary_url")
   parser.add_option('--ldap-ssl', default=None, help="Use SSL [true/false] for LDAP", dest="ldap_ssl")
@@ -576,29 +535,83 @@ def init_parser_options(parser):
   parser.add_option('--ldap-save-settings', action="store_true", default=None, help="Save without review for LDAP", dest="ldap_save_settings")
   parser.add_option('--ldap-referral', default=None, help="Referral method [follow/ignore] for LDAP", dest="ldap_referral")
   parser.add_option('--ldap-bind-anonym', default=None, help="Bind anonymously [true/false] for LDAP", dest="ldap_bind_anonym")
-  parser.add_option('--ldap-sync-admin-name', default=None, help="Username for LDAP sync", dest="ldap_sync_admin_name")
-  parser.add_option('--ldap-sync-admin-password', default=None, help="Password for LDAP sync", dest="ldap_sync_admin_password")
   parser.add_option('--ldap-sync-username-collisions-behavior', default=None, help="Handling behavior for username collisions [convert/skip] for LDAP sync", dest="ldap_sync_username_collisions_behavior")
 
-  parser.add_option('--truststore-type', default=None, help="Type of TrustStore (jks|jceks|pkcs12)", dest="trust_store_type")
-  parser.add_option('--truststore-path', default=None, help="Path of TrustStore", dest="trust_store_path")
-  parser.add_option('--truststore-password', default=None, help="Password for TrustStore", dest="trust_store_password")
-  parser.add_option('--truststore-reconfigure', action="store_true", default=None, help="Force to reconfigure TrustStore if exits", dest="trust_store_reconfigure")
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_set_current_parser_options(parser):
+  parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
+  parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
+  parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version")
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_setup_security_parser_options(parser):
   parser.add_option('--security-option', default=None,
                     help="Setup security option (setup-https|encrypt-password|setup-kerberos-jaas|setup-truststore|import-certificate)",
                     dest="security_option")
-  parser.add_option('--api-ssl', default=None, help="Enable SSL for Ambari API [true/false]", dest="api_ssl")
-  parser.add_option('--api-ssl-port', default=None, help="Client API SSL port", dest="api_ssl_port")
-  parser.add_option('--import-cert-path', default=None, help="Path to Certificate (import)", dest="import_cert_path")
-  parser.add_option('--import-cert-alias', default=None, help="Alias for the imported certificate", dest="import_cert_alias")
-  parser.add_option('--import-key-path', default=None, help="Path to Private Key (import)", dest="import_key_path")
-  parser.add_option('--pem-password', default=None, help="Password for Private Key", dest="pem_password")
-  parser.add_option('--master-key', default=None, help="Master key for encrypting passwords", dest="master_key")
-  parser.add_option('--master-key-persist', default=None, help="Persist master key [true/false]", dest="master_key_persist")
-  parser.add_option('--jaas-principal', default=None, help="Kerberos principal for ambari server", dest="jaas_principal")
-  parser.add_option('--jaas-keytab', default=None, help="Keytab path for Kerberos principal", dest="jaas_keytab")
 
+  https_group = optparse.OptionGroup(parser, "setup-https options")
+  https_group.add_option('--api-ssl', default=None, help="Enable SSL for Ambari API [true/false]", dest="api_ssl")
+  https_group.add_option('--api-ssl-port', default=None, help="Client API SSL port", dest="api_ssl_port")
+  https_group.add_option('--import-key-path', default=None, help="Path to Private Key (import)", dest="import_key_path")
+  https_group.add_option('--pem-password', default=None, help="Password for Private Key", dest="pem_password")
+  parser.add_option_group(https_group)
+
+  encrypt_passwords_group = optparse.OptionGroup(parser, "encrypt-passwords options")
+  encrypt_passwords_group.add_option('--master-key', default=None, help="Master key for encrypting passwords", dest="master_key")
+  encrypt_passwords_group.add_option('--master-key-persist', default=None, help="Persist master key [true/false]", dest="master_key_persist")
+  parser.add_option_group(encrypt_passwords_group)
+
+  setup_kerberos_jaas_group = optparse.OptionGroup(parser, "setup-kerberos-jaas options")
+  setup_kerberos_jaas_group.add_option('--jaas-principal', default=None, help="Kerberos principal for ambari server", dest="jaas_principal")
+  setup_kerberos_jaas_group.add_option('--jaas-keytab', default=None, help="Keytab path for Kerberos principal", dest="jaas_keytab")
+  parser.add_option_group(setup_kerberos_jaas_group)
+
+  setup_truststore_group = optparse.OptionGroup(parser, "setup-truststore options, uses encrypt-passwords options if configured")
+  setup_truststore_group.add_option('--truststore-type', default=None, help="Type of TrustStore (jks|jceks|pkcs12)", dest="trust_store_type")
+  setup_truststore_group.add_option('--truststore-path', default=None, help="Path of TrustStore", dest="trust_store_path")
+  setup_truststore_group.add_option('--truststore-password', default=None, help="Password for TrustStore", dest="trust_store_password")
+  setup_truststore_group.add_option('--truststore-reconfigure', action="store_true", default=None, help="Force to reconfigure TrustStore if exits", dest="trust_store_reconfigure")
+  parser.add_option_group(setup_truststore_group)
+
+  import_certificate_group = optparse.OptionGroup(parser, "import-certificate options, uses --truststore-path option")
+  import_certificate_group.add_option('--import-cert-path', default=None, help="Path to Certificate (import)", dest="import_cert_path")
+  import_certificate_group.add_option('--import-cert-alias', default=None, help="Alias for the imported certificate", dest="import_cert_alias")
+  parser.add_option_group(import_certificate_group)
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_enable_stack_parser_options(parser):
+  parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string",
+                    help="Specify stack version that needs to be enabled. All other stacks versions will be disabled")
+  parser.add_option('--stack', dest="stack_name", default=None, type="string",
+                    help="Specify stack name for the stack versions that needs to be enabled")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_db_cleanup_parser_options(parser):
+  parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
+  parser.add_option("-d", "--from-date", dest="cleanup_from_date", default=None, type="string", help="Specify date for the cleanup process in 'yyyy-MM-dd' format")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_install_mpack_parser_options(parser):
+  parser.add_option('--mpack', default=None, help="Specify the path for management pack to be installed", dest="mpack_path")
+  parser.add_option('--purge', action="store_true", default=False, help="Purge existing resources specified in purge-list", dest="purge")
+  purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
+  default_purge_resources = ",".join([STACK_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME])
+
+  parser.add_option('--purge-list', default=default_purge_resources,
+                    help="Comma separated list of resources to purge ({0}). By default ({1}) will be purged.".format(purge_resources, default_purge_resources),
+                    dest="purge_list")
+  parser.add_option('--force', action="store_true", default=False, help="Force install management pack", dest="force")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_uninstall_mpack_parser_options(parser):
+  parser.add_option('--mpack-name', default=None, help="Specify the management pack name to be uninstalled", dest="mpack_name")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_upgrade_mpack_parser_options(parser):
+  parser.add_option('--mpack', default=None, help="Specify the path for management pack to be updated", dest="mpack_path")
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_kerberos_setup_parser_options(parser):
   parser.add_option('--kerberos-setup', default=None, help="Setup Kerberos Authentication", dest="kerberos_setup")
   parser.add_option('--kerberos-enabled', default=False, help="Kerberos enabled", dest="kerberos_enabled")
   parser.add_option('--kerberos-spnego-principal', default="HTTP/_HOST", help="Kerberos SPNEGO principal", dest="kerberos_spnego_principal")
@@ -774,6 +787,46 @@ def create_user_action_map(args, options):
       }
   return action_map
 
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def init_action_parser(action, parser):
+  action_parser_map = {
+    SETUP_ACTION: init_setup_parser_options,
+    SETUP_JCE_ACTION: init_empty_parser_options,
+    START_ACTION: init_start_parser_options,
+    STOP_ACTION: init_empty_parser_options,
+    RESTART_ACTION: init_start_parser_options,
+    RESET_ACTION: init_empty_parser_options,
+    STATUS_ACTION: init_empty_parser_options,
+    UPGRADE_ACTION: init_empty_parser_options,
+    UPGRADE_STACK_ACTION:init_empty_parser_options,
+    LDAP_SETUP_ACTION: init_ldap_setup_parser_options,
+    LDAP_SYNC_ACTION: init_ldap_sync_parser_options,
+    SET_CURRENT_ACTION: init_set_current_parser_options,
+    SETUP_SECURITY_ACTION: init_setup_security_parser_options,
+    REFRESH_STACK_HASH_ACTION: init_empty_parser_options,
+    BACKUP_ACTION: init_empty_parser_options,
+    RESTORE_ACTION: init_empty_parser_options,
+    UPDATE_HOST_NAMES_ACTION: init_empty_parser_options,
+    CHECK_DATABASE_ACTION: init_empty_parser_options,
+    ENABLE_STACK_ACTION: init_enable_stack_parser_options,
+    SETUP_SSO_ACTION: init_empty_parser_options,
+    DB_CLEANUP_ACTION: init_db_cleanup_parser_options,
+    INSTALL_MPACK_ACTION: init_install_mpack_parser_options,
+    UNINSTALL_MPACK_ACTION: init_uninstall_mpack_parser_options,
+    UPGRADE_MPACK_ACTION: init_upgrade_mpack_parser_options,
+    PAM_SETUP_ACTION: init_empty_parser_options,
+    KERBEROS_SETUP_ACTION: init_kerberos_setup_parser_options,
+  }
+  parser.add_option("-v", "--verbose",
+                    action="store_true", dest="verbose", default=False,
+                    help="Print verbose status messages")
+  parser.add_option("-s", "--silent",
+                    action="store_true", dest="silent", default=False,
+                    help="Silently accepts default prompt values. For db-cleanup command, silent mode will stop ambari server.")
+  try:
+    action_parser_map[action](parser)
+  except KeyError:
+    parser.error("Invalid action: " + action)
 
 def setup_logging(logger, filename, logging_level):
   formatter = logging.Formatter(formatstr)
@@ -825,16 +878,6 @@ def main(options, args, parser):
 
   options.warnings = []
 
-  if are_cmd_line_db_args_blank(options):
-    options.must_set_database_options = True
-  elif not are_cmd_line_db_args_valid(options):
-    parser.error('All database options should be set. Please see help for the options.')
-  else:
-    options.must_set_database_options = False
-
-  #correct database
-  fix_database_options(options, parser)
-
   if len(args) == 0:
     print parser.print_help()
     parser.error("No action entered")
@@ -848,6 +891,17 @@ def main(options, args, parser):
   except KeyError:
     parser.error("Invalid action: " + action)
 
+  if action == SETUP_ACTION:
+    if are_cmd_line_db_args_blank(options):
+      options.must_set_database_options = True
+    elif not are_cmd_line_db_args_valid(options):
+      parser.error('All database options should be set. Please see help for the options.')
+    else:
+      options.must_set_database_options = False
+
+    #correct database
+    fix_database_options(options, parser)
+
   matches = 0
   for args_number_required in action_obj.possible_args_numbers:
     matches += int(len(args) == args_number_required)
@@ -900,8 +954,9 @@ def main(options, args, parser):
     sys.exit(options.exit_code)
 
 def mainBody():
-  parser = optparse.OptionParser(usage="usage: %prog [options] action [stack_id os]",)
-  init_parser_options(parser)
+  parser = optparse.OptionParser(usage="usage: %prog action [options]",)
+  action = sys.argv[1]
+  init_action_parser(action, parser)
   (options, args) = parser.parse_args()
 
   # check if only silent key set

http://git-wip-us.apache.org/repos/asf/ambari/blob/772be786/ambari-server/src/main/python/ambari_server/setupMpacks.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupMpacks.py b/ambari-server/src/main/python/ambari_server/setupMpacks.py
index aaf9c10..625e428 100755
--- a/ambari-server/src/main/python/ambari_server/setupMpacks.py
+++ b/ambari-server/src/main/python/ambari_server/setupMpacks.py
@@ -714,7 +714,7 @@ def _install_mpack(options, replay_mode=False, is_upgrade=False):
     _execute_hook(mpack_metadata, BEFORE_INSTALL_HOOK_NAME, tmp_root_dir)
 
   # Purge previously installed stacks and management packs
-  if options.purge and options.purge_list:
+  if not is_upgrade and options.purge and options.purge_list:
     purge_resources = options.purge_list.split(",")
     validate_purge(options, purge_resources, tmp_root_dir, mpack_metadata, replay_mode)
     purge_stacks_and_mpacks(purge_resources, replay_mode)
@@ -934,9 +934,6 @@ def upgrade_mpack(options, replay_mode=False):
   """
   logger.info("Upgrade mpack.")
   mpack_path = options.mpack_path
-  if options.purge:
-    print_error_msg("Purge is not supported with upgrade_mpack action!")
-    raise FatalException(-1, "Purge is not supported with upgrade_mpack action!")
 
   if not mpack_path:
     print_error_msg("Management pack not specified!")
@@ -962,7 +959,7 @@ def upgrade_mpack(options, replay_mode=False):
 
   print_info_msg("Management pack {0}-{1} successfully upgraded!".format(mpack_name, mpack_version))
   if not replay_mode:
-    add_replay_log(UPGRADE_MPACK_ACTION, mpack_archive_path, options.purge, options.purge_list, options.force, options.verbose)
+    add_replay_log(UPGRADE_MPACK_ACTION, mpack_archive_path, False, [], options.force, options.verbose)
 
 def replay_mpack_logs():
   """

http://git-wip-us.apache.org/repos/asf/ambari/blob/772be786/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 9579c22..66b5ac5 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -85,7 +85,7 @@ with patch.object(platform, "linux_distribution", return_value = MagicMock(retur
                   print_info_msg, print_warning_msg, print_error_msg
                 from ambari_commons.os_utils import run_os_command, search_file, set_file_permissions, remove_file, copy_file, \
                   is_valid_filepath
-                from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers
+                from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers, DBMSConfig
                 from ambari_server.dbConfiguration_linux import PGConfig, LinuxDBMSConfig, OracleConfig
                 from ambari_server.properties import Properties
                 from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
@@ -302,30 +302,27 @@ class TestAmbariServer(TestCase):
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup_security")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_setup_security(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                    logger_mock, OptionParserMock,
-                                    setup_security_method):
-    opm = OptionParserMock.return_value
-    options = MagicMock()
-    args = ["setup-security"]
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.security_option = "setup-security"
-    options.sid_or_sname = "sid"
-    setup_security_method.return_value = None
+                                    logger_mock, setup_security_method):
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', 'setup-security', '--security-option=setup-security']
+      setup_security_method.return_value = None
 
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    _ambari_server_.mainBody()
-    self.assertTrue(setup_security_method.called)
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      _ambari_server_.mainBody()
+      self.assertTrue(setup_security_method.called)
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+    finally:
+      sys.argv = tmp_argv
+  pass
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup_ambari_krb5_jaas")
@@ -404,148 +401,147 @@ class TestAmbariServer(TestCase):
   @patch.object(_ambari_server_, "start")
   @patch.object(_ambari_server_, "stop")
   @patch.object(_ambari_server_, "reset")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_setup(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                           logger_mock, OptionParserMock, reset_method, stop_method,
+                           logger_mock, reset_method, stop_method,
                            start_method, setup_method, exit_mock):
-    opm = OptionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ["ambari-server", "setup"]
 
-    options.dbms = None
-    options.sid_or_sname = "sid"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
 
-    setup_method.reset_mock()
-    start_method.reset_mock()
-    stop_method.reset_mock()
-    reset_method.reset_mock()
-    exit_mock.reset_mock()
-    args = ["setup", "-v"]
-    options = self._create_empty_options_mock()
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.sid_or_sname = "sid"
-    setup_method.side_effect = Exception("Unexpected error")
-    try:
+      setup_method.reset_mock()
+      start_method.reset_mock()
+      stop_method.reset_mock()
+      reset_method.reset_mock()
+      exit_mock.reset_mock()
+      sys.argv = ["ambari-server", "setup", "-v"]
+      setup_method.side_effect = Exception("Unexpected error")
+      try:
+        _ambari_server_.mainBody()
+      except Exception:
+        self.assertTrue(True)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
+      self.assertTrue(get_verbose())
+
+      setup_method.reset_mock()
+      start_method.reset_mock()
+      stop_method.reset_mock()
+      reset_method.reset_mock()
+      exit_mock.reset_mock()
+      sys.argv = ["ambari-server", "setup"]
+      setup_method.side_effect = Exception("Unexpected error")
       _ambari_server_.mainBody()
-    except Exception:
-      self.assertTrue(True)
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-    self.assertTrue(get_verbose())
-
-    setup_method.reset_mock()
-    start_method.reset_mock()
-    stop_method.reset_mock()
-    reset_method.reset_mock()
-    exit_mock.reset_mock()
-    args = ["setup"]
-    options = self._create_empty_options_mock()
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.sid_or_sname = "sid"
-    options.verbose = False
-    setup_method.side_effect = Exception("Unexpected error")
-    _ambari_server_.mainBody()
-    self.assertTrue(exit_mock.called)
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
-    self.assertFalse(get_verbose())
+      self.assertTrue(exit_mock.called)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
+      self.assertFalse(get_verbose())
 
-    pass
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
-  @patch.object(_ambari_server_, "setup")
-  @patch("optparse.OptionParser")
+  @patch.object(PGConfig, "_setup_local_server")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
-  def test_main_with_preset_dbms(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                 logger_mock, optionParserMock, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+  @patch("ambari_server.serverSetup.check_ambari_user")
+  @patch('ambari_server.serverSetup.download_and_install_jdk')
+  @patch("ambari_server.serverSetup.configure_os_settings")
+  @patch.object(DBMSConfig, "setup_database")
+  @patch("ambari_server.serverSetup.check_jdbc_drivers")
+  @patch("ambari_server.serverSetup.extract_views")
+  @patch("ambari_server.serverSetup.adjust_directory_permissions")
+  @patch("ambari_server.serverSetup.service_setup")
+  def test_main_with_preset_dbms(self, service_setup_mock, adjust_directory_permissions_mock, extract_views_mock, check_jdbc_drivers_mock, setup_database_mock, configure_os_settings_mock, download_and_install_jdk_mock, check_ambari_user_mock, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
+                                 logger_mock, setup_local_db_method):
+    extract_views_mock.return_value = 0
+    check_ambari_user_mock.return_value = (0, False, 'user', None)
+    configure_os_settings_mock.return_value = 0
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ["ambari-server", "setup", "-s"]
 
-    options.dbms = "sqlanywhere"
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertEquals(options.database_index, 5)
-    pass
+      self.assertTrue(setup_local_db_method.called)
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup")
   @patch.object(_ambari_server_, "fix_database_options")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
-  def test_fix_database_options_called(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock, optionParserMock,
+  def test_fix_database_options_called(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
                                        fixDBOptionsMock, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', 'setup']
 
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertTrue(fixDBOptionsMock.called)
-    set_silent(False)
-    pass
+      self.assertTrue(setup_method.called)
+      self.assertTrue(fixDBOptionsMock.called)
+      set_silent(False)
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "setup")
   @patch.object(_ambari_server_, "start")
   @patch.object(_ambari_server_, "stop")
   @patch.object(_ambari_server_, "reset")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_start(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
-                           optionParserMock, reset_method, stop_method,
+                           reset_method, stop_method,
                            start_method, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["setup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "setup"]
 
-    options.dbms = None
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@@ -656,33 +652,32 @@ class TestAmbariServer(TestCase):
   @patch.object(_ambari_server_, "reset")
   @patch.object(_ambari_server_, "backup")
   @patch.object(_ambari_server_, "restore")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_backup(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
-                            optionParserMock, restore_mock, backup_mock, reset_method, stop_method,
+                            restore_mock, backup_mock, reset_method, stop_method,
                            start_method, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["backup"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "backup"]
 
-    options.dbms = None
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertTrue(backup_mock.called)
-    self.assertFalse(restore_mock.called)
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(backup_mock.called)
+      self.assertFalse(restore_mock.called)
+      self.assertFalse(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   #Restore is not yet supported on Windows
   @not_for_platform(PLATFORM_WINDOWS)
@@ -693,33 +688,31 @@ class TestAmbariServer(TestCase):
   @patch.object(_ambari_server_, "reset")
   @patch.object(_ambari_server_, "backup")
   @patch.object(_ambari_server_, "restore")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_restore(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock, logger_mock,
-                             optionParserMock, restore_mock, backup_mock, reset_method, stop_method,
+                             restore_mock, backup_mock, reset_method, stop_method,
                             start_method, setup_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    args = ["restore"]
-    opm.parse_args.return_value = (options, args)
-
-    options.dbms = None
-    options.sid_or_sname = "sname"
-    _ambari_server_.mainBody()
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "restore"]
+      _ambari_server_.mainBody()
 
-    self.assertTrue(restore_mock.called)
-    self.assertFalse(backup_mock.called)
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertFalse(reset_method.called)
+      self.assertTrue(restore_mock.called)
+      self.assertFalse(backup_mock.called)
+      self.assertFalse(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertFalse(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@@ -791,32 +784,30 @@ class TestAmbariServer(TestCase):
   @patch.object(_ambari_server_, "start")
   @patch.object(_ambari_server_, "stop")
   @patch.object(_ambari_server_, "reset")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_reset(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                           logger_mock, optionParserMock, reset_method, stop_method,
+                           logger_mock, reset_method, stop_method,
                            start_method, setup_method):
-    opm = optionParserMock.return_value
-
-    options = self._create_empty_options_mock()
-    args = ["reset"]
-    opm.parse_args.return_value = (options, args)
-    options.dbms = None
-    options.sid_or_sname = "sid"
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "reset"]
 
-    _ambari_server_.mainBody()
+      _ambari_server_.mainBody()
 
-    self.assertFalse(setup_method.called)
-    self.assertFalse(start_method.called)
-    self.assertFalse(stop_method.called)
-    self.assertTrue(reset_method.called)
+      self.assertFalse(setup_method.called)
+      self.assertFalse(start_method.called)
+      self.assertFalse(stop_method.called)
+      self.assertTrue(reset_method.called)
 
-    self.assertFalse(False, get_verbose())
-    self.assertFalse(False, get_silent())
-    pass
+      self.assertFalse(False, get_verbose())
+      self.assertFalse(False, get_silent())
+      pass
+    finally:
+      sys.argv = tmp_argv
 
 
   @not_for_platform(PLATFORM_WINDOWS)
@@ -8497,64 +8488,84 @@ class TestAmbariServer(TestCase):
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "is_server_runing")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_status_running(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                    logger_mock,  optionParserMock, is_server_runing_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    del options.exit_message
+                                    logger_mock, is_server_runing_method):
 
-    args = ["status"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "status"]
 
-    is_server_runing_method.return_value = (True, 100)
+      is_server_runing_method.return_value = (True, 100)
 
-    options.dbms = None
-    options.sid_or_sname = "sid"
 
-    try:
-      _ambari_server_.mainBody()
-    except SystemExit as e:
-      self.assertTrue(e.code == 0)
+      try:
+        _ambari_server_.mainBody()
+      except SystemExit as e:
+        self.assertTrue(e.code == 0)
 
-    self.assertTrue(is_server_runing_method.called)
-    pass
+      self.assertTrue(is_server_runing_method.called)
+      pass
+    finally:
+      sys.argv = tmp_argv
 
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(_ambari_server_, "is_server_runing")
-  @patch("optparse.OptionParser")
   @patch.object(_ambari_server_, "logger")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch.object(_ambari_server_, "setup_logging")
   @patch.object(_ambari_server_, "init_logging")
   def test_main_test_status_not_running(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
-                                        logger_mock, optionParserMock, is_server_runing_method):
-    opm = optionParserMock.return_value
-    options = self._create_empty_options_mock()
-    del options.exit_message
+                                        logger_mock, is_server_runing_method):
 
-    args = ["status"]
-    opm.parse_args.return_value = (options, args)
+    import sys
+    tmp_argv = sys.argv
+    try:
+      sys.argv = ['ambari-server', "status"]
 
-    is_server_runing_method.return_value = (False, None)
+      is_server_runing_method.return_value = (False, None)
 
-    options.dbms = None
-    options.sid_or_sname = "sid"
+      try:
+        _ambari_server_.mainBody()
+      except SystemExit as e:
+        self.assertTrue(e.code == 3)
 
+      self.assertTrue(is_server_runing_method.called)
+      pass
+    finally:
+      sys.argv = tmp_argv
+
+  @not_for_platform(PLATFORM_WINDOWS)
+  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
+  @patch.object(_ambari_server_, "logger")
+  @patch("ambari_server.serverConfiguration.get_ambari_properties")
+  @patch.object(_ambari_server_, "setup_logging")
+  @patch.object(_ambari_server_, "init_logging")
+  def test_status_extra_option(self, init_logging_mock, setup_logging_mock, get_ambari_properties_mock,
+                                        logger_mock):
+
+    import sys
+    tmp_argv = sys.argv
     try:
-      _ambari_server_.mainBody()
-    except SystemExit as e:
-      self.assertTrue(e.code == 3)
+      sys.argv = ['ambari-server', "status", '--skip-database-check']
+      flag = False
+      try:
+        _ambari_server_.mainBody()
+      except SystemExit as e:
+        self.assertEquals(e.code, 2)
+        flag = True
 
-    self.assertTrue(is_server_runing_method.called)
-    pass
+      self.assertTrue(flag)
 
+      pass
+    finally:
+      sys.argv = tmp_argv
 
   def test_web_server_startup_timeout(self):
     from ambari_server.serverConfiguration import get_web_server_startup_timeout