You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2016/12/07 21:17:46 UTC

[01/41] ambari git commit: AMBARI-19074. Host-Ordered: Empty groups are returned for Downgrade (ncole)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-18456 803f44ba5 -> 88c289252


AMBARI-19074. Host-Ordered: Empty groups are returned for Downgrade (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/50ead4d2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/50ead4d2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/50ead4d2

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 50ead4d237b0c03fee68192a5983044409e3e9db
Parents: 8fbc271
Author: Nate Cole <nc...@hortonworks.com>
Authored: Mon Dec 5 12:54:41 2016 -0500
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Mon Dec 5 15:27:19 2016 -0500

----------------------------------------------------------------------
 .../ambari/server/state/stack/UpgradePack.java  | 19 ++++++++--
 .../ambari/server/state/UpgradeHelperTest.java  | 39 +++++++++++---------
 2 files changed, 36 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/50ead4d2/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
index aa97fa3..76f1897 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
@@ -289,10 +289,15 @@ public class UpgradePack {
     if (direction.isUpgrade()) {
       list = groups;
     } else {
-      if (type == UpgradeType.ROLLING) {
-        list = getDowngradeGroupsForRolling();
-      } else if (type == UpgradeType.NON_ROLLING) {
-        list = getDowngradeGroupsForNonrolling();
+      switch (type) {
+        case NON_ROLLING:
+          list = getDowngradeGroupsForNonrolling();
+          break;
+        case HOST_ORDERED:
+        case ROLLING:
+        default:
+          list = getDowngradeGroupsForRolling();
+          break;
       }
     }
 
@@ -358,6 +363,12 @@ public class UpgradePack {
   private List<Grouping> getDowngradeGroupsForRolling() {
     List<Grouping> reverse = new ArrayList<Grouping>();
 
+    // !!! Testing exposed groups.size() == 1 issue.  Normally there's no precedent for
+    // a one-group upgrade pack, so take it into account anyway.
+    if (groups.size() == 1) {
+      return groups;
+    }
+
     int idx = 0;
     int iter = 0;
     Iterator<Grouping> it = groups.iterator();

http://git-wip-us.apache.org/repos/asf/ambari/blob/50ead4d2/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index 6b4b85a..ea1f18a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
+import java.lang.reflect.Field;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
@@ -1921,25 +1922,27 @@ public class UpgradeHelperTest {
     ServiceComponentHost sch1 = sc.addServiceComponentHost("h1");
     ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
 
-    UpgradePack upgradePack = new UpgradePack() {
-      @Override
-      public List<Grouping> getGroups(Direction direction) {
-        HostOrderItem hostItem = new HostOrderItem(HostOrderActionType.HOST_UPGRADE,
-            Lists.newArrayList("h1", "h2"));
-        HostOrderItem checkItem = new HostOrderItem(HostOrderActionType.SERVICE_CHECK,
-            Lists.newArrayList("ZOOKEEPER", "STORM"));
-
-        Grouping g = new HostOrderGrouping();
-        ((HostOrderGrouping) g).setHostOrderItems(Lists.newArrayList(hostItem, checkItem));
-        g.title = "Some Title";
-        return Lists.newArrayList(g);
-      }
+    // !!! make a custom grouping
+    HostOrderItem hostItem = new HostOrderItem(HostOrderActionType.HOST_UPGRADE,
+        Lists.newArrayList("h1", "h2"));
+    HostOrderItem checkItem = new HostOrderItem(HostOrderActionType.SERVICE_CHECK,
+        Lists.newArrayList("ZOOKEEPER", "STORM"));
+
+    Grouping g = new HostOrderGrouping();
+    ((HostOrderGrouping) g).setHostOrderItems(Lists.newArrayList(hostItem, checkItem));
+    g.title = "Some Title";
+
+    UpgradePack upgradePack = new UpgradePack();
+
+    // !!! set the groups directly; allow the logic in getGroups(Direction) to happen
+    Field field = UpgradePack.class.getDeclaredField("groups");
+    field.setAccessible(true);
+    field.set(upgradePack, Lists.newArrayList(g));
+
+    field = UpgradePack.class.getDeclaredField("type" );
+    field.setAccessible(true);
+    field.set(upgradePack, UpgradeType.HOST_ORDERED);
 
-      @Override
-      public Map<String, Map<String, ProcessingComponent>> getTasks() {
-        return new HashMap<>();
-      }
-    };
 
     MasterHostResolver resolver = new MasterHostResolver(m_configHelper, c);
     UpgradeContext context = new UpgradeContext(c, UpgradeType.HOST_ORDERED, Direction.UPGRADE, new HashMap<String, Object>());


[27/41] ambari git commit: AMBARI-18966. Add check to ensure we do not have @Transactional annotations on private methods. (Attila Doroszlai via stoader)

Posted by jo...@apache.org.
AMBARI-18966. Add check to ensure we do not have @Transactional annotations on private methods. (Attila Doroszlai via stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/304a5bf6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/304a5bf6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/304a5bf6

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 304a5bf6027efa9e70b1deaba325d54e586e4292
Parents: 6806edc
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Wed Dec 7 10:45:54 2016 +0100
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Wed Dec 7 10:45:54 2016 +0100

----------------------------------------------------------------------
 ambari-project/pom.xml                          | 41 +++++++++++++++
 ambari-server/checkstyle.xml                    | 17 ++++++
 ambari-server/pom.xml                           |  4 ++
 .../internal/AlertTargetResourceProvider.java   |  2 +-
 .../ClusterStackVersionResourceProvider.java    |  4 +-
 .../internal/UpgradeResourceProvider.java       |  2 +-
 .../apache/ambari/server/orm/dao/AlertsDAO.java |  6 +--
 .../apache/ambari/server/state/ServiceImpl.java |  2 +-
 .../server/state/cluster/ClustersImpl.java      |  2 +-
 .../ambari/server/state/host/HostImpl.java      |  2 +-
 .../services/RetryUpgradeActionService.java     |  2 +-
 .../svccomphost/ServiceComponentHostImpl.java   |  2 +-
 .../server/upgrade/UpgradeCatalog240.java       |  4 +-
 pom.xml                                         |  1 +
 utility/pom.xml                                 | 33 +++++++++++-
 ...AvoidTransactionalOnPrivateMethodsCheck.java | 55 ++++++++++++++++++++
 .../src/main/resources/checkstyle_packages.xml  | 15 ++++++
 ...dTransactionalOnPrivateMethodsCheckTest.java | 49 +++++++++++++++++
 .../InputTransactionalOnPrivateMethods.java     | 46 ++++++++++++++++
 19 files changed, 274 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-project/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-project/pom.xml b/ambari-project/pom.xml
index 7f273eb..b00be38 100644
--- a/ambari-project/pom.xml
+++ b/ambari-project/pom.xml
@@ -30,6 +30,8 @@
     <ambari.dir>${project.parent.basedir}</ambari.dir>
     <powermock.version>1.6.3</powermock.version>
     <jetty.version>8.1.19.v20160209</jetty.version>
+    <checkstyle.version>6.19</checkstyle.version> <!-- last version that does not require Java 8 -->
+    <checkstyle.skip>false</checkstyle.skip>
   </properties>
   <profiles>
     <profile>
@@ -477,6 +479,11 @@
         <artifactId>jline</artifactId>
         <version>2.11</version>
       </dependency>
+      <dependency>
+        <groupId>com.puppycrawl.tools</groupId>
+        <artifactId>checkstyle</artifactId>
+        <version>${checkstyle.version}</version>
+      </dependency>
     </dependencies>
   </dependencyManagement>
   <build>
@@ -486,6 +493,40 @@
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-surefire-plugin</artifactId>
         </plugin>
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-checkstyle-plugin</artifactId>
+          <version>2.17</version>
+          <executions>
+            <execution>
+              <id>checkstyle</id>
+              <phase>test</phase>
+              <configuration>
+                <configLocation>${project.basedir}/checkstyle.xml</configLocation>
+                <encoding>UTF-8</encoding>
+                <consoleOutput>true</consoleOutput>
+                <failsOnError>true</failsOnError>
+                <linkXRef>false</linkXRef>
+                <skip>${checkstyle.skip}</skip>
+              </configuration>
+              <goals>
+                <goal>check</goal>
+              </goals>
+            </execution>
+          </executions>
+          <dependencies>
+            <dependency>
+              <groupId>com.puppycrawl.tools</groupId>
+              <artifactId>checkstyle</artifactId>
+              <version>${checkstyle.version}</version>
+            </dependency>
+            <dependency>
+              <groupId>utility</groupId>
+              <artifactId>utility</artifactId>
+              <version>1.0.0.0-SNAPSHOT</version>
+            </dependency>
+          </dependencies>
+        </plugin>
       </plugins>
     </pluginManagement>
     <plugins>

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/checkstyle.xml
----------------------------------------------------------------------
diff --git a/ambari-server/checkstyle.xml b/ambari-server/checkstyle.xml
new file mode 100644
index 0000000..81f6380
--- /dev/null
+++ b/ambari-server/checkstyle.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0"?>
+<!-- Licensed under the Apache License, Version 2.0 (the "License"); you
+  may not use this file except in compliance with the License. You may obtain
+  a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless
+  required by applicable law or agreed to in writing, software distributed
+  under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
+  OR CONDITIONS OF ANY KIND, either express or implied. See the License for
+  the specific language governing permissions and limitations under the License.
+  See accompanying LICENSE file. -->
+<!DOCTYPE module PUBLIC
+  "-//Puppy Crawl//DTD Check Configuration 1.3//EN"
+  "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
+<module name="Checker">
+  <module name="TreeWalker">
+    <module name="AvoidTransactionalOnPrivateMethodsCheck"/>
+  </module>
+</module>

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 69ab9d0..e8de84e 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -692,6 +692,10 @@
         <artifactId>jetty-maven-plugin</artifactId>
         <version>${jetty.version}</version>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+      </plugin>
     </plugins>
     <resources>
       <resource>

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
index 8e5674e..4010528 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
@@ -374,7 +374,7 @@ public class AlertTargetResourceProvider extends
    */
   @Transactional
   @SuppressWarnings("unchecked")
-  private void updateAlertTargets(long alertTargetId,
+  void updateAlertTargets(long alertTargetId,
       Map<String, Object> requestMap)
       throws AmbariException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 42aae84..d417ec2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -442,7 +442,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
   @Transactional
-  private void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
+  void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
       String desiredRepoVersion, RepositoryVersionState repoState)
       throws AmbariException, SystemException {
     final String clusterName = cluster.getClusterName();
@@ -482,7 +482,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
   @Transactional
-  private RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
+  RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
       List<Host> hosts, RepositoryVersionEntity repoVersionEnt, Map<String, Object> propertyMap)
       throws AmbariException, SystemException {
     final AmbariManagementController managementController = getManagementController();

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 5931743..67be152 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -972,7 +972,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * @throws AmbariException
    */
   @Transactional
-  private UpgradeEntity createUpgradeInsideTransaction(Cluster cluster,
+  UpgradeEntity createUpgradeInsideTransaction(Cluster cluster,
       RequestStageContainer request,
       UpgradeEntity upgradeEntity) throws AmbariException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
index d187947..e8c4b5f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
@@ -1492,7 +1492,7 @@ public class AlertsDAO implements Cleanable {
    * @return a long representing the number of affected (deleted) records
    */
   @Transactional
-  private int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+  int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
     LOG.info("Deleting AlertNotice entities before date " + new Date(beforeDateMillis));
     EntityManager entityManager = m_entityManagerProvider.get();
     List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1523,7 +1523,7 @@ public class AlertsDAO implements Cleanable {
    * @return a long representing the number of affected (deleted) records
    */
   @Transactional
-  private int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
+  int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
     LOG.info("Deleting AlertCurrent entities before date " + new Date(beforeDateMillis));
     EntityManager entityManager = m_entityManagerProvider.get();
     List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1553,7 +1553,7 @@ public class AlertsDAO implements Cleanable {
    */
 
   @Transactional
-  private int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+  int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
     return executeQuery("AlertHistoryEntity.removeInClusterBeforeDate", AlertHistoryEntity.class, clusterId, beforeDateMillis);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index f87b99c..e223eed 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -451,7 +451,7 @@ public class ServiceImpl implements Service {
   }
 
   @Transactional
-  private void persistEntities(ClusterServiceEntity serviceEntity) {
+  void persistEntities(ClusterServiceEntity serviceEntity) {
     long clusterId = cluster.getClusterId();
     ClusterEntity clusterEntity = clusterDAO.findById(clusterId);
     serviceEntity.setClusterEntity(clusterEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index 8e5f37e..1de10f9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -183,7 +183,7 @@ public class ClustersImpl implements Clusters {
    */
   @Inject
   @Transactional
-  private void loadClustersAndHosts() {
+  void loadClustersAndHosts() {
     List<HostEntity> hostEntities = hostDAO.findAll();
     for (HostEntity hostEntity : hostEntities) {
       Host host = hostFactory.create(hostEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index dd5e635..a444f5d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@ -954,7 +954,7 @@ public class HostImpl implements Host {
   }
 
   @Transactional
-  private void persistEntities(HostEntity hostEntity) {
+  void persistEntities(HostEntity hostEntity) {
     hostDAO.create(hostEntity);
     if (!hostEntity.getClusterEntities().isEmpty()) {
       for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java b/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
index 1ea5558..a92aa04 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
@@ -190,7 +190,7 @@ public class RetryUpgradeActionService extends AbstractScheduledService {
    * @param requestId Request Id to search tasks for.
    */
   @Transactional
-  private void retryHoldingCommandsInRequest(Long requestId) {
+  void retryHoldingCommandsInRequest(Long requestId) {
     if (requestId == null) {
       return;
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index c1655aa..5225598 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -1313,7 +1313,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   }
 
   @Transactional
-  private void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity,
+  void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity,
       HostComponentDesiredStateEntity desiredStateEntity) {
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
         serviceComponent.getClusterId(), serviceComponent.getServiceName(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
index c113fff..b7e248a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
@@ -1563,7 +1563,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
    * @throws SQLException
    */
   @Transactional
-  private void updateServiceComponentDesiredStateTableDDL() throws SQLException {
+  void updateServiceComponentDesiredStateTableDDL() throws SQLException {
     if (dbAccessor.tableHasPrimaryKey(SERVICE_COMPONENT_DS_TABLE, ID)) {
       LOG.info("Skipping {} table Primary Key modifications since the new {} column already exists",
           SERVICE_COMPONENT_DS_TABLE, ID);
@@ -2785,7 +2785,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
    *  instead of cluster_name
    */
   @Transactional
-  private void updateViewInstanceTable() throws SQLException {
+  void updateViewInstanceTable() throws SQLException {
     try {
       if (Long.class.equals(dbAccessor.getColumnClass(VIEWINSTANCE_TABLE, CLUSTER_HANDLE_COLUMN))) {
         LOG.info(String.format("%s column is already numeric. Skipping an update of %s table.", CLUSTER_HANDLE_COLUMN, VIEWINSTANCE_TABLE));

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index bd0aa7b..aeb1467 100644
--- a/pom.xml
+++ b/pom.xml
@@ -266,6 +266,7 @@
             <exclude>contrib/agent-simulator/docker_image/package_list.txt</exclude>
             <exclude>contrib/agent-simulator/config/cluster.txt</exclude>
             <exclude>version</exclude>
+            <exclude>**/target/surefire-reports/</exclude>
             <!--IDE and GIT files-->
             <exclude>**/.idea/</exclude>
             <exclude>**/.classpath/</exclude>

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/utility/pom.xml
----------------------------------------------------------------------
diff --git a/utility/pom.xml b/utility/pom.xml
index 2febb83..6f60206 100644
--- a/utility/pom.xml
+++ b/utility/pom.xml
@@ -20,6 +20,13 @@
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
 
+  <parent>
+    <groupId>org.apache.ambari</groupId>
+    <artifactId>ambari-project</artifactId>
+    <version>2.0.0.0-SNAPSHOT</version>
+    <relativePath>../ambari-project</relativePath>
+  </parent>
+
   <artifactId>utility</artifactId>
   <groupId>utility</groupId>
   <version>1.0.0.0-SNAPSHOT</version>
@@ -28,9 +35,25 @@
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
-      <version>4.12</version>
       <scope>compile</scope>    <!-- has to be compile-time dependency on junit -->
     </dependency>
+    <dependency>
+      <groupId>com.puppycrawl.tools</groupId>
+      <artifactId>checkstyle</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.puppycrawl.tools</groupId>
+      <artifactId>checkstyle</artifactId>
+      <type>test-jar</type>
+      <version>${checkstyle.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>19.0</version> <!-- required for checkstyle -->
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>
@@ -45,6 +68,14 @@
         </configuration>
       </plugin>
       <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptors>
+            <descriptor>${project.parent.basedir}/src/main/assemblies/empty.xml</descriptor>
+          </descriptors>
+        </configuration>
+      </plugin>
+      <plugin>
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>rpm-maven-plugin</artifactId>
         <version>2.0.1</version>

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/utility/src/main/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheck.java
----------------------------------------------------------------------
diff --git a/utility/src/main/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheck.java b/utility/src/main/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheck.java
new file mode 100644
index 0000000..80da103
--- /dev/null
+++ b/utility/src/main/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheck.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.checkstyle;
+
+import com.puppycrawl.tools.checkstyle.api.AbstractCheck;
+import com.puppycrawl.tools.checkstyle.api.DetailAST;
+import com.puppycrawl.tools.checkstyle.api.TokenTypes;
+
+/**
+ * Detects private methods annotated as <code>Transactional</code>.
+ * See https://github.com/google/guice/wiki/Transactions for why this should be
+ * avoided.
+ */
+public class AvoidTransactionalOnPrivateMethodsCheck extends AbstractCheck {
+
+  private static final String ANNOTATION_NAME = "Transactional";
+  public static final String MSG_TRANSACTIONAL_ON_PRIVATE_METHOD = "@" + ANNOTATION_NAME + " should not be used on private methods";
+
+  @Override
+  public int[] getDefaultTokens() {
+    return new int[] { TokenTypes.METHOD_DEF };
+  }
+
+  @Override
+  public void visitToken(DetailAST ast) {
+    DetailAST modifiers = ast.findFirstToken(TokenTypes.MODIFIERS);
+    if (modifiers.findFirstToken(TokenTypes.LITERAL_PRIVATE) != null) {
+      DetailAST annotation = modifiers.findFirstToken(TokenTypes.ANNOTATION);
+      while (annotation != null) {
+        DetailAST name = annotation.findFirstToken(TokenTypes.IDENT);
+        if (name != null && ANNOTATION_NAME.equals(name.getText())) {
+          log(ast.getLineNo(), MSG_TRANSACTIONAL_ON_PRIVATE_METHOD);
+          break;
+        }
+        annotation = annotation.getNextSibling();
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/utility/src/main/resources/checkstyle_packages.xml
----------------------------------------------------------------------
diff --git a/utility/src/main/resources/checkstyle_packages.xml b/utility/src/main/resources/checkstyle_packages.xml
new file mode 100644
index 0000000..8cf4a44
--- /dev/null
+++ b/utility/src/main/resources/checkstyle_packages.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Licensed under the Apache License, Version 2.0 (the "License"); you
+  may not use this file except in compliance with the License. You may obtain
+  a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless
+  required by applicable law or agreed to in writing, software distributed
+  under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
+  OR CONDITIONS OF ANY KIND, either express or implied. See the License for
+  the specific language governing permissions and limitations under the License.
+  See accompanying LICENSE file. -->
+<!DOCTYPE checkstyle-packages PUBLIC
+  "-//Puppy Crawl//DTD Package Names 1.0//EN"
+  "http://www.puppycrawl.com/dtds/packages_1_0.dtd">
+<checkstyle-packages>
+  <package name="org.apache.ambari.checkstyle"/>
+</checkstyle-packages>

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/utility/src/test/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheckTest.java
----------------------------------------------------------------------
diff --git a/utility/src/test/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheckTest.java b/utility/src/test/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheckTest.java
new file mode 100644
index 0000000..14d7486
--- /dev/null
+++ b/utility/src/test/java/org/apache/ambari/checkstyle/AvoidTransactionalOnPrivateMethodsCheckTest.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.checkstyle;
+
+import static org.apache.ambari.checkstyle.AvoidTransactionalOnPrivateMethodsCheck.MSG_TRANSACTIONAL_ON_PRIVATE_METHOD;
+
+import java.io.File;
+import java.io.IOException;
+
+import com.puppycrawl.tools.checkstyle.BaseCheckTestSupport;
+import com.puppycrawl.tools.checkstyle.DefaultConfiguration;
+
+import org.junit.Test;
+
+public class AvoidTransactionalOnPrivateMethodsCheckTest extends BaseCheckTestSupport {
+
+  @Override
+  protected String getPath(String filename) throws IOException {
+    return new File("src/test/resources/org/apache/ambari/checkstyle/" + filename)
+      .getCanonicalPath();
+  }
+
+  @Test
+  public void transactionalOnPrivateMethod() throws Exception {
+    final DefaultConfiguration config = createCheckConfig(AvoidTransactionalOnPrivateMethodsCheck.class);
+    final String[] expected = {
+      "32: " + MSG_TRANSACTIONAL_ON_PRIVATE_METHOD,
+      "41: " + MSG_TRANSACTIONAL_ON_PRIVATE_METHOD,
+    };
+
+    verify(config, getPath("InputTransactionalOnPrivateMethods.java"), expected);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/304a5bf6/utility/src/test/resources/org/apache/ambari/checkstyle/InputTransactionalOnPrivateMethods.java
----------------------------------------------------------------------
diff --git a/utility/src/test/resources/org/apache/ambari/checkstyle/InputTransactionalOnPrivateMethods.java b/utility/src/test/resources/org/apache/ambari/checkstyle/InputTransactionalOnPrivateMethods.java
new file mode 100644
index 0000000..2ea5444
--- /dev/null
+++ b/utility/src/test/resources/org/apache/ambari/checkstyle/InputTransactionalOnPrivateMethods.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.checkstyle;
+
+import com.google.inject.persist.Transactional;
+
+/**
+ * Input file for AvoidTransactionalOnPrivateMethodsCheckTest.
+ */
+public class InputTransactionalOnPrivateMethods {
+
+  @Transactional
+  public void publicMethodWithTransactional() {
+    ;
+  }
+
+  @Transactional
+  private void privateMethodWithTransactional() {
+    ;
+  }
+
+  private void privateMethodWithoutTransactional() {
+    ;
+  }
+
+  @Transactional
+  private void otherPrivateMethodWithTransactional() {
+    ;
+  }
+
+}


[07/41] ambari git commit: AMBARI-18981 - Manage JournalNodes Wizard: Display only the steps needed for DELETE ONLY task (rzang)

Posted by jo...@apache.org.
AMBARI-18981 - Manage JournalNodes Wizard: Display only the steps needed for DELETE ONLY task (rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/353b2ac0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/353b2ac0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/353b2ac0

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 353b2ac0064e0d014ace50acba12c523491782a8
Parents: e88f846
Author: Richard Zang <rz...@apache.org>
Authored: Mon Dec 5 18:11:12 2016 -0800
Committer: Richard Zang <rz...@apache.org>
Committed: Mon Dec 5 18:11:12 2016 -0800

----------------------------------------------------------------------
 .../app/controllers/global/update_controller.js |  5 ++--
 .../journalNode/wizard_controller.js            | 28 +++++++++++++-------
 .../app/routes/manage_journalnode_routes.js     |  4 +--
 .../highAvailability/journalNode/wizard.hbs     | 13 +++++----
 .../highAvailability/journalNode/step2_view.js  |  5 ++--
 5 files changed, 30 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/353b2ac0/ambari-web/app/controllers/global/update_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/update_controller.js b/ambari-web/app/controllers/global/update_controller.js
index 4631f09..ce93b34 100644
--- a/ambari-web/app/controllers/global/update_controller.js
+++ b/ambari-web/app/controllers/global/update_controller.js
@@ -249,11 +249,10 @@ App.UpdateController = Em.Controller.extend({
         ]);
       }
       else {
-        // clusterController.isHostsLoaded may be changed in callback, that is why it's value is cached before calling callback
-        isHostsLoaded = App.router.get('clusterController.isHostsLoaded');
-        callback();
         // On pages except for hosts/hostDetails, making sure hostsMapper loaded only once on page load, no need to update, but at least once
+        isHostsLoaded = App.router.get('clusterController.isHostsLoaded');
         if (isHostsLoaded) {
+          callback();
           return;
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/353b2ac0/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js
index 2305524..fe9a15b 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/wizard_controller.js
@@ -120,22 +120,30 @@ App.ManageJournalNodeWizardController = App.WizardController.extend({
   },
 
   getJournalNodesToAdd: function () {
-    return this.get('content.masterComponentHosts').filterProperty('component', 'JOURNALNODE')
-      .filterProperty('isInstalled', false).mapProperty('hostName');
+    var result = [];
+    var masterComponentHosts = this.get('content.masterComponentHosts');
+    if (masterComponentHosts) {
+      result = masterComponentHosts.filterProperty('component', 'JOURNALNODE').filterProperty('isInstalled', false).mapProperty('hostName');
+    }
+    return result;
   },
 
   getJournalNodesToDelete: function () {
-    var existingHosts = App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE').mapProperty('hostName');
-    var currentJNs = this.get('content.masterComponentHosts').filterProperty('component', 'JOURNALNODE');
-    var removed = existingHosts.filter(function(host) {
-      return currentJNs.filterProperty('hostName', host).length == 0;
-    });
-    return removed;
+    var result = [];
+    var masterComponentHosts = this.get('content.masterComponentHosts');
+    if (masterComponentHosts) {
+      var currentJNs = masterComponentHosts.filterProperty('component', 'JOURNALNODE');
+      var existingHosts = App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE').mapProperty('hostName');
+      result = existingHosts.filter(function(host) {
+        return currentJNs.filterProperty('hostName', host).length == 0;
+      });
+    }
+    return result;
   },
 
   isDeleteOnly: function () {
-    return this.getJournalNodesToAdd().length == 0 && this.getJournalNodesToDelete().length > 0;
-  },
+    return this.get('currentStep') > 1 && this.getJournalNodesToAdd().length == 0 && this.getJournalNodesToDelete().length > 0;
+  }.property('content.masterComponentHosts', 'App.router.clusterController.isHostsLoaded', 'currentStep'),
 
   /**
    * Save config properties

http://git-wip-us.apache.org/repos/asf/ambari/blob/353b2ac0/ambari-web/app/routes/manage_journalnode_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/manage_journalnode_routes.js b/ambari-web/app/routes/manage_journalnode_routes.js
index e13a338..90b45b0 100644
--- a/ambari-web/app/routes/manage_journalnode_routes.js
+++ b/ambari-web/app/routes/manage_journalnode_routes.js
@@ -118,7 +118,7 @@ module.exports = App.WizardRoute.extend({
       controller.saveConfigTag(stepController.get("hdfsSiteTag"));
       controller.saveNameServiceId(stepController.get('content.nameServiceId'));
       App.set('router.nextBtnClickInProgress', false);
-      if (controller.isDeleteOnly()) {
+      if (controller.get('isDeleteOnly')) {
         router.transitionTo('step4');
       } else {
         router.transitionTo('step3');
@@ -167,7 +167,7 @@ module.exports = App.WizardRoute.extend({
     next: function (router) {
       var controller = router.get('manageJournalNodeWizardController');
       controller.clearTasksData();
-      if (controller.isDeleteOnly()) {
+      if (controller.get('isDeleteOnly')) {
         router.transitionTo('step8');
       } else {
         router.transitionTo('step5');

http://git-wip-us.apache.org/repos/asf/ambari/blob/353b2ac0/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs b/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs
index 844e1c8..a4c2155 100644
--- a/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs
+++ b/ambari-web/app/templates/main/admin/highAvailability/journalNode/wizard.hbs
@@ -22,13 +22,12 @@
             <ul class="nav nav-pills nav-stacked">
                 <li {{bindAttr class="isStep1:active view.isStep1Disabled:disabled view.isStep1Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep1 target="controller"}}><i class="step-marker"><span class="step-index">1</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step1.header}}</p></a></li>
                 <li {{bindAttr class="isStep2:active view.isStep2Disabled:disabled view.isStep2Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep2 target="controller"}}><i class="step-marker"><span class="step-index">2</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step2.header}}</p></a></li>
-                <li {{bindAttr class="isStep3:active view.isStep3Disabled:disabled view.isStep3Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}><i class="step-marker"><span class="step-index">3</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step3.header}}</p></a></li>
-                <li {{bindAttr class="isStep4:active view.isStep4Disabled:disabled view.isStep4Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}><i class="step-marker"><span class="step-index">4</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step4.header}}</p></a></li>
-                <li {{bindAttr class="isStep5:active view.isStep5Disabled:disabled view.isStep5Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep5 target="controller"}}><i class="step-marker"><span class="step-index">5</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step5.header}}</p></a></li>
-                <li {{bindAttr class="isStep6:active view.isStep6Disabled:disabled view.isStep6Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep6 target="controller"}}><i class="step-marker"><span class="step-index">6</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step6.header}}</p></a></li>
-                <li {{bindAttr class="isStep7:active view.isStep7Disabled:disabled view.isStep7Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep5 target="controller"}}><i class="step-marker"><span class="step-index">7</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step7.header}}</p></a></li>
-                <li {{bindAttr class="isStep8:active view.isStep8Disabled:disabled view.isStep8Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep6 target="controller"}}><i class="step-marker"><span class="step-index">8</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step8.header}}</p></a></li>
-
+                <li {{bindAttr class="isStep3:active view.isStep3Disabled:disabled view.isStep3Completed:completed isDeleteOnly:hidden"}}><a href="javascript:void(null);"  {{action gotoStep3 target="controller"}}><i class="step-marker"><span class="step-index">3</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step3.header}}</p></a></li>
+                <li {{bindAttr class="isStep4:active view.isStep4Disabled:disabled view.isStep4Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep4 target="controller"}}><i class="step-marker"><span class="step-index">{{#if isDeleteOnly}}3{{else}}4{{/if}}</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step4.header}}</p></a></li>
+                <li {{bindAttr class="isStep5:active view.isStep5Disabled:disabled view.isStep5Completed:completed isDeleteOnly:hidden"}}><a href="javascript:void(null);"  {{action gotoStep5 target="controller"}}><i class="step-marker"><span class="step-index">5</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step5.header}}</p></a></li>
+                <li {{bindAttr class="isStep6:active view.isStep6Disabled:disabled view.isStep6Completed:completed isDeleteOnly:hidden"}}><a href="javascript:void(null);"  {{action gotoStep6 target="controller"}}><i class="step-marker"><span class="step-index">6</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step6.header}}</p></a></li>
+                <li {{bindAttr class="isStep7:active view.isStep7Disabled:disabled view.isStep7Completed:completed isDeleteOnly:hidden"}}><a href="javascript:void(null);"  {{action gotoStep5 target="controller"}}><i class="step-marker"><span class="step-index">7</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step7.header}}</p></a></li>
+                <li {{bindAttr class="isStep8:active view.isStep8Disabled:disabled view.isStep8Completed:completed"}}><a href="javascript:void(null);"  {{action gotoStep6 target="controller"}}><i class="step-marker"><span class="step-index">{{#if isDeleteOnly}}4{{else}}8{{/if}}</span></i><p class="step-name">{{t admin.manageJournalNode.wizard.step8.header}}</p></a></li>
             </ul>
         </div>
         {{outlet}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/353b2ac0/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js b/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js
index 656c487..680daa5 100644
--- a/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js
+++ b/ambari-web/app/views/main/admin/highAvailability/journalNode/step2_view.js
@@ -22,18 +22,17 @@ var App = require('app');
 App.ManageJournalNodeWizardStep2View = Em.View.extend({
 
   templateName: require('templates/main/admin/highAvailability/journalNode/step2'),
-  aaa: '',
   didInsertElement: function () {
     this.get('controller').loadStep();
   },
 
   journalNodesToAdd: function () {
     return App.router.get('manageJournalNodeWizardController').getJournalNodesToAdd();
-  }.property(),
+  }.property('App.router.clusterController.isHostsLoaded'),
 
   journalNodesToDelete: function () {
     return App.router.get('manageJournalNodeWizardController').getJournalNodesToDelete();
-  }.property(),
+  }.property('App.router.clusterController.isHostsLoaded'),
 
   isBackButtonVisible: false
 });


[20/41] ambari git commit: AMBARI-19093. HDP 3.0 support for HDFS with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..30c49c7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/metainfo.xml
@@ -0,0 +1,190 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <displayName>HDFS</displayName>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.7.1.3.0</version>
+      <extends>common-services/HDFS/2.1.0.2.0</extends>
+
+      <components>
+        <!-- NFS Gateway was added in HDP 2.3. -->
+        <component>
+          <name>NFS_GATEWAY</name>
+          <displayName>NFSGateway</displayName>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/nfsgateway.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <versionAdvertised>true</versionAdvertised>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>rpcbind</name>
+              <condition>should_install_rpcbind</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_${stack_version}</name>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-client</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>lzo</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadooplzo_${stack_version}</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadooplzo_${stack_version}-native</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-libhdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>suse12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_${stack_version}</name>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-client</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>liblzo2-2</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadooplzo_${stack_version}</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadooplzo_${stack_version}-native</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-libhdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-${stack_version}-client</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-datanode</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-journalnode</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-namenode</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-secondarynamenode</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-hdfs-zkfc</name>
+            </package>
+            <package>
+              <name>libsnappy1</name>
+            </package>
+            <package>
+              <name>libsnappy-dev</name>
+            </package>
+            <package>
+              <name>hadooplzo-${stack_version}</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>libhdfs0-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
new file mode 100644
index 0000000..5318ba0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/quicklinks/quicklinks.json
@@ -0,0 +1,80 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"dfs.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"hdfs-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "namenode_ui",
+        "label": "NameNode UI",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_logs",
+        "label": "NameNode Logs",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@/logs",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "namenode_jmx",
+        "label": "NameNode JMX",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@/jmx",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      },
+      {
+        "name": "Thread Stacks",
+        "label": "Thread Stacks",
+        "component_name": "NAMENODE",
+        "url":"%@://%@:%@/stacks",
+        "requires_user_name": "false",
+        "port":{
+          "http_property": "dfs.namenode.http-address",
+          "http_default_port": "50070",
+          "https_property": "dfs.namenode.https-address",
+          "https_default_port": "50470",
+          "regex": "\\w*:(\\d+)",
+          "site": "hdfs-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
new file mode 100644
index 0000000..6f2b797
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/themes/theme.json
@@ -0,0 +1,179 @@
+{
+  "name": "default",
+  "description": "Default theme for HDFS service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-namenode",
+                  "display-name": "NameNode",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-namenode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-datanode",
+                  "display-name": "DataNode",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-datanode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hdfs-site/dfs.namenode.name.dir",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hadoop-env/namenode_heapsize",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.handler.count",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.data.dir",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hadoop-env/dtnode_heapsize",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+          "subsection-name": "subsection-datanode-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hdfs-site/dfs.namenode.name.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hadoop-env/namenode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.data.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hadoop-env/dtnode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+


[31/41] ambari git commit: AMBARI-19125. Active tab is not switched (onechiporenko)

Posted by jo...@apache.org.
AMBARI-19125. Active tab is not switched (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/234838f4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/234838f4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/234838f4

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 234838f4b6651dbebf09d57910aea925c8247f1c
Parents: 69cdaa7
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Wed Dec 7 15:01:08 2016 +0200
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Wed Dec 7 15:01:08 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/templates/common/host_progress_popup.hbs | 8 +++++---
 ambari-web/app/views/main/host/menu.js                  | 4 ++--
 2 files changed, 7 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/234838f4/ambari-web/app/templates/common/host_progress_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/host_progress_popup.hbs b/ambari-web/app/templates/common/host_progress_popup.hbs
index f0a8c17..ae411b1 100644
--- a/ambari-web/app/templates/common/host_progress_popup.hbs
+++ b/ambari-web/app/templates/common/host_progress_popup.hbs
@@ -266,9 +266,11 @@
           <div class="col-sm-12">
             {{#if App.supports.logSearch}}
               {{#if view.isLogSearchInstalled}}
-                <button type="button" class="btn btn-link pull-right" {{action navigateToHostLogs target="view"}} {{bindAttr class="view.isLogsLinkVisible::hidden"}}>
-                  <i class="glyphicon glyphicon-file"></i>&nbsp;{{t common.host}} {{t common.logs}}
-                </button>
+                {{#isAuthorized "SERVICE.VIEW_OPERATIONAL_LOGS"}}
+                  <button type="button" class="btn btn-link pull-right" {{action navigateToHostLogs target="view"}} {{bindAttr class="view.isLogsLinkVisible::hidden"}}>
+                    <i class="glyphicon glyphicon-file"></i>&nbsp;{{t common.host}} {{t common.logs}}
+                  </button>
+                {{/isAuthorized}}
               {{/if}}
             {{/if}}
             <button type="button" class="btn btn-link pull-right" {{translateAttr title="common.openNewWindow"}} {{action openTaskLogInDialog}}>

http://git-wip-us.apache.org/repos/asf/ambari/blob/234838f4/ambari-web/app/views/main/host/menu.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/menu.js b/ambari-web/app/views/main/host/menu.js
index b3c52f4..4ebc140 100644
--- a/ambari-web/app/views/main/host/menu.js
+++ b/ambari-web/app/views/main/host/menu.js
@@ -95,9 +95,9 @@ App.MainHostMenuView = Em.CollectionView.extend({
   activateView: function () {
     var defaultRoute = App.router.get('currentState.name') || "summary";
     $.each(this._childViews, function () {
-      this.set('active', (this.get('content.routing') == defaultRoute ? "active" : ""));
+      this.set('active', this.get('content.routing') === defaultRoute ? 'active' : '');
     });
-  },
+  }.observes('App.router.currentState.name'),
 
   deactivateChildViews: function () {
     this.get('_childViews').setEach('active', '');


[23/41] ambari git commit: AMBARI-19094. HDP 3.0 support for YARN/MR with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)

Posted by jo...@apache.org.
AMBARI-19094. HDP 3.0 support for YARN/MR with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a62119ab
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a62119ab
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a62119ab

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: a62119ab9f1abc355bc99ee0fc71afac6723d827
Parents: 2a710b3
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Tue Dec 6 16:04:56 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Tue Dec 6 16:54:11 2016 -0800

----------------------------------------------------------------------
 .../HDP/3.0/services/YARN/YARN_widgets.json     | 670 +++++++++++++++
 .../YARN/configuration-mapred/mapred-env.xml    |  51 ++
 .../YARN/configuration-mapred/mapred-site.xml   | 134 +++
 .../YARN/configuration/capacity-scheduler.xml   |  71 ++
 .../YARN/configuration/ranger-yarn-audit.xml    | 177 ++++
 .../ranger-yarn-plugin-properties.xml           |  82 ++
 .../configuration/ranger-yarn-policymgr-ssl.xml |  66 ++
 .../YARN/configuration/ranger-yarn-security.xml |  58 ++
 .../services/YARN/configuration/yarn-env.xml    | 200 +++++
 .../services/YARN/configuration/yarn-log4j.xml  | 103 +++
 .../services/YARN/configuration/yarn-site.xml   | 814 +++++++++++++++++++
 .../stacks/HDP/3.0/services/YARN/kerberos.json  | 278 +++++++
 .../stacks/HDP/3.0/services/YARN/metainfo.xml   | 173 ++++
 .../YARN/quicklinks-mapred/quicklinks.json      |  80 ++
 .../services/YARN/quicklinks/quicklinks.json    |  80 ++
 .../3.0/services/YARN/themes-mapred/theme.json  | 132 +++
 .../HDP/3.0/services/YARN/themes/theme.json     | 250 ++++++
 17 files changed, 3419 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
new file mode 100644
index 0000000..782f21d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/YARN_widgets.json
@@ -0,0 +1,670 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_yarn_dashboard",
+      "display_name": "Standard YARN Dashboard",
+      "section_name": "YARN_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Memory Utilization",
+          "description": "Percentage of total memory allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
+              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory Utilization",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "CPU Utilization",
+          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
+              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized across NodeManager",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Bad Local Disks",
+          "description": "Number of unhealthy local disks across all NodeManagers.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "metric_path": "metrics/yarn/BadLocalDirs",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.BadLogDirs",
+              "metric_path": "metrics/yarn/BadLogDirs",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Number of unhealthy local disks for NodeManager",
+              "value": "${yarn.NodeManagerMetrics.BadLocalDirs + yarn.NodeManagerMetrics.BadLogDirs}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "Percentage of all containers failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "metric_path": "metrics/yarn/ContainersFailed._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "metric_path": "metrics/yarn/ContainersCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "metric_path": "metrics/yarn/ContainersLaunched._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
+              "metric_path": "metrics/yarn/ContainersIniting._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "metric_path": "metrics/yarn/ContainersKilled._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
+              "metric_path": "metrics/yarn/ContainersRunning._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "App Failures",
+          "description": "Percentage of all launched applications failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsFailed._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsFailed._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsKilled._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsKilled._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
+              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted._rate",
+              "metric_path": "metrics/yarn/Queue/root/AppsCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "App Failures",
+              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed._rate/(yarn.QueueMetrics.Queue=root.AppsFailed._rate + yarn.QueueMetrics.Queue=root.AppsKilled._rate + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted._rate + yarn.QueueMetrics.Queue=root.AppsCompleted._rate)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Pending Apps",
+          "description": "Count of applications waiting for cluster resources to become available.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Pending Apps",
+              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Apps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Memory",
+          "description": "Percentage of memory used across all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "mem_total._sum",
+              "metric_path": "metrics/memory/mem_total._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "mem_free._sum",
+              "metric_path": "metrics/memory/mem_free._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total._sum - mem_free._sum)/mem_total._sum) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Disk",
+          "description": "Sum of disk throughput for all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "read_bps._sum",
+              "metric_path": "metrics/disk/read_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "write_bps._sum",
+              "metric_path": "metrics/disk/write_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read throughput",
+              "value": "${read_bps._sum/1048576}"
+            },
+            {
+              "name": "Write throughput",
+              "value": "${write_bps._sum/1048576}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Mbps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Network",
+          "description": "Average of Network utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "pkts_in._avg",
+              "metric_path": "metrics/network/pkts_in._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "pkts_out._avg",
+              "metric_path": "metrics/network/pkts_out._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Packets In",
+              "value": "${pkts_in._avg}"
+            },
+            {
+              "name": "Packets Out",
+              "value": "${pkts_out._avg}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster CPU",
+          "description": "Percentage of CPU utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system._sum",
+              "metric_path": "metrics/cpu/cpu_system._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_user._sum",
+              "metric_path": "metrics/cpu/cpu_user._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_nice._sum",
+              "metric_path": "metrics/cpu/cpu_nice._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_idle._sum",
+              "metric_path": "metrics/cpu/cpu_idle._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_wio._sum",
+              "metric_path": "metrics/cpu/cpu_wio._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_yarn_heatmap",
+      "display_name": "YARN Heatmaps",
+      "section_name": "YARN_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "YARN local disk space utilization per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "metric_path": "metrics/yarn/GoodLocalDirsDiskUtilizationPerc",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "metric_path": "metrics/yarn/GoodLogDirsDiskUtilizationPerc",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "YARN local disk space utilization per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc + yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc)/2}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableGB",
+              "metric_path": "metrics/yarn/AvailableGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable RAM Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableVCores",
+              "metric_path": "metrics/yarn/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
+              "metric_path": "metrics/yarn/ContainersFailed._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
+              "metric_path": "metrics/yarn/ContainersCompleted._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
+              "metric_path": "metrics/yarn/ContainersLaunched._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting",
+              "metric_path": "metrics/yarn/ContainersIniting",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
+              "metric_path": "metrics/yarn/ContainersKilled._rate",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning",
+              "metric_path": "metrics/yarn/ContainersRunning",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager GC Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager Garbage Collection Time",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "NodeManager JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager JVM Heap Memory Used",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "Allocated Containers",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "metric_path": "metrics/yarn/AllocatedContainers",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Allocated Containers",
+              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager RAM Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager RAM Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager CPU Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager CPU Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
new file mode 100644
index 0000000..a70fad3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <!-- mapred-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>mapred-env template</display-name>
+    <description>This is the jinja template for mapred-env.sh file</description>
+    <value>
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
+
+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
+export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+export HADOOP_OPTS="-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS"
+export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100644
index 0000000..cef2b14
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64</value>
+    <description>
+      Additional execution environment entries for map and reduce task processes.
+      This is not an additive property. You must preserve the original value if
+      you want your map and reduce tasks to have access to native libraries (compression, etc)
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.application.framework.path</name>
+    <value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.admin-command-opts</name>
+    <value>-Dhdp.version=${hdp.version}</value>
+    <description>
+      Java opts for the MR App Master processes.
+      The following symbol, if present, will be interpolated: @taskid@ is replaced
+      by current TaskID. Any other occurrences of '@' will go unchanged.
+      For example, to enable verbose gc logging to a file named for the taskid in
+      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.map.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.admin.reduce.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
+    <value>1</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
+    <value>1000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
+    <value>30000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.job.emit-timeline-data</name>
+    <value>false</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.bind-host</name>
+    <value>0.0.0.0</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>mapreduce.jobhistory.recovery.enable</name>
+    <value>true</value>
+    <description>Enable the history server to store server state and recover
+      server state upon startup.  If enabled then
+      mapreduce.jobhistory.recovery.store.class must be specified.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.recovery.store.class</name>
+    <value>org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService</value>
+    <description>The HistoryServerStateStoreService class to store history server
+      state for recovery.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.recovery.store.leveldb.path</name>
+    <value>/hadoop/mapreduce/jhs</value>
+    <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
+      is configured as the recovery storage class.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
new file mode 100644
index 0000000..4768e46
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>yarn.scheduler.capacity.resource-calculator</name>
+    <description>
+      The ResourceCalculator implementation to be used to compare Resources in the scheduler.
+      The default i.e. org.apache.hadoop.yarn.util.resource.DefaultResourseCalculator only uses
+      Memory while DominantResourceCalculator uses Dominant-resource to compare multi-dimensional
+      resources such as Memory, CPU etc. A Java ResourceCalculator class name is expected.
+    </description>
+    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <display-name>CPU Scheduling</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DominantResourceCalculator</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
+    <value>*</value>
+    <description/>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- In HDP 2.3, yarn.scheduler.capacity.root.default-node-label-expression was deleted -->
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>capacity-scheduler</name>
+    <description>Enter key=value (one per line) for all properties of capacity-scheduler.xml</description>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
new file mode 100644
index 0000000..a6b1baa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-audit.xml
@@ -0,0 +1,177 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <value>false</value>
+    <display-name>Audit to DB</display-name>
+    <description>Is Audit to DB enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.db</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <value>{{audit_jdbc_url}}</value>
+    <description>Audit DB JDBC URL</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <value>{{xa_audit_db_user}}</value>
+    <description>Audit DB JDBC User</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <value>crypted</value>
+    <property-type>PASSWORD</property-type>
+    <description>Audit DB JDBC Password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <value>{{jdbc_driver}}</value>
+    <description>Audit DB JDBC Driver</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>Credential file store</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <value>/var/log/hadoop/yarn/audit/db/spool</value>
+    <description>/var/log/hadoop/yarn/audit/db/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hadoop/yarn/audit/hdfs/spool</value>
+    <description>/var/log/hadoop/yarn/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hadoop/yarn/audit/solr/spool</value>
+    <description>/var/log/hadoop/yarn/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
new file mode 100644
index 0000000..97867cc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-plugin-properties.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for YARN</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value/>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-yarn-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for YARN</display-name>
+    <description>Enable ranger yarn plugin ?</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>yarn</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>yarn</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
new file mode 100644
index 0000000..5410104
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
new file mode 100644
index 0000000..5f69962
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/ranger-yarn-security.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.yarn.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing policies for this Yarn instance</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
+    <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.yarn.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
new file mode 100644
index 0000000..bbc2930
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-env.xml
@@ -0,0 +1,200 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true" supports_adding_forbidden="true">
+  <!-- These properties were inherited from HDP 2.1 -->
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <value>1024</value>
+    <display-name>AppTimelineServer Java heap size</display-name>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.2 -->
+  <property>
+    <name>yarn_cgroups_enabled</name>
+    <value>false</value>
+    <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
+    <display-name>CPU Isolation</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These properties were inherited from HDP 2.3 -->
+  <property>
+    <name>is_supported_yarn_ranger</name>
+    <value>true</value>
+    <description>Set to false by default,  needs to be set to true in stacks that use Ranger Yarn Plugin</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- yarn-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>yarn-env template</display-name>
+    <description>This is the jinja template for yarn-env.sh file</description>
+    <value>
+      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+      export JAVA_HOME={{java64_home}}
+      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+
+      # We need to add the EWMA appender for the yarn daemons only;
+      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
+      # daemons. This is restrict the EWMA appender to daemons only.
+      INVOKER="${0##*/}"
+      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
+        export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
+      fi
+
+      # User for YARN daemons
+      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+      # resolve links - $0 may be a softlink
+      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+      # some Java parameters
+      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+      if [ "$JAVA_HOME" != "" ]; then
+      #echo "run java in $JAVA_HOME"
+      JAVA_HOME=$JAVA_HOME
+      fi
+
+      if [ "$JAVA_HOME" = "" ]; then
+      echo "Error: JAVA_HOME is not set."
+      exit 1
+      fi
+
+      JAVA=$JAVA_HOME/bin/java
+      JAVA_HEAP_MAX=-Xmx1000m
+
+      # For setting YARN specific HEAP sizes please use this
+      # Parameter and set appropriately
+      YARN_HEAPSIZE={{yarn_heapsize}}
+
+      # check envvars which might override default args
+      if [ "$YARN_HEAPSIZE" != "" ]; then
+      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+      fi
+
+      # Resource Manager specific parameters
+
+      # Specify the max Heapsize for the ResourceManager using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1000.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_RESOURCEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+      # Specify the JVM options to be used when starting the ResourceManager.
+      # These options will be appended to the options specified as YARN_OPTS
+      # and therefore may override any similar flags set in YARN_OPTS
+      #export YARN_RESOURCEMANAGER_OPTS=
+
+      # Node Manager specific parameters
+
+      # Specify the max Heapsize for the NodeManager using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1000.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_NODEMANAGER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+      # Specify the max Heapsize for the timeline server using a numerical value
+      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+      # the value to 1024.
+      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+      # and/or YARN_TIMELINESERVER_OPTS.
+      # If not specified, the default value will be picked from either YARN_HEAPMAX
+      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+      # Specify the JVM options to be used when starting the NodeManager.
+      # These options will be appended to the options specified as YARN_OPTS
+      # and therefore may override any similar flags set in YARN_OPTS
+      #export YARN_NODEMANAGER_OPTS=
+
+      # so that filenames w/ spaces are handled correctly in loops below
+      IFS=
+
+
+      # default log directory and file
+      if [ "$YARN_LOG_DIR" = "" ]; then
+      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+      fi
+      if [ "$YARN_LOGFILE" = "" ]; then
+      YARN_LOGFILE='yarn.log'
+      fi
+
+      # default policy file for service-level authorization
+      if [ "$YARN_POLICYFILE" = "" ]; then
+      YARN_POLICYFILE="hadoop-policy.xml"
+      fi
+
+      # restore ordinary behaviour
+      unset IFS
+
+
+      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+      fi
+      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
new file mode 100644
index 0000000..9ac34f3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-log4j.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>yarn-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+#Relative to Yarn Log Dir Prefix
+yarn.log.dir=.
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# LEVEL,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+
+# Appender for viewing information for errors and warnings
+yarn.ewma.cleanupInterval=300
+yarn.ewma.messageAgeLimitSeconds=86400
+yarn.ewma.maxUniqueMessages=250
+log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
+log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
+log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
+
+# Audit logging for ResourceManager
+rm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
+log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
+log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
+
+# Audit logging for NodeManager
+nm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
+log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
+log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>


[28/41] ambari git commit: AMBARI-19108 Perf: Web UI can't load json with host check info. (atkach)

Posted by jo...@apache.org.
AMBARI-19108 Perf: Web UI can't load json with host check info. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/165c4d69
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/165c4d69
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/165c4d69

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 165c4d692b4a76e2911027ac3f06d1948fb696e7
Parents: 304a5bf
Author: Andrii Tkach <at...@apache.org>
Authored: Tue Dec 6 20:17:35 2016 +0200
Committer: Andrii Tkach <at...@apache.org>
Committed: Wed Dec 7 12:47:42 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/mixins/main/host/details/actions/check_host.js | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/165c4d69/ambari-web/app/mixins/main/host/details/actions/check_host.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/main/host/details/actions/check_host.js b/ambari-web/app/mixins/main/host/details/actions/check_host.js
index 532353f..faf8ef1 100644
--- a/ambari-web/app/mixins/main/host/details/actions/check_host.js
+++ b/ambari-web/app/mixins/main/host/details/actions/check_host.js
@@ -193,7 +193,8 @@ App.CheckHostMixin = Em.Mixin.create({
    */
   getHostCheckTasksSuccess: function (data) {
     if (!data) {
-      return;
+      //if resolution host check has corrupted data then skip it
+      return this.getGeneralHostCheck();
     }
     if (["FAILED", "COMPLETED", "TIMEDOUT"].contains(data.Requests.request_status)) {
       if (data.Requests.inputs.indexOf("last_agent_env_check") != -1) {


[39/41] ambari git commit: AMBARI-19121. Add ability for users to provide mount exclusion list before cluster deployment (Dmytro Grinenko via smohanty)

Posted by jo...@apache.org.
AMBARI-19121. Add ability for users to provide mount exclusion list before cluster deployment (Dmytro Grinenko via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/51ec956b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/51ec956b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/51ec956b

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 51ec956b14378d8d0fdab03fa8d2e60559772d1f
Parents: d56576d
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Wed Dec 7 10:35:05 2016 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Wed Dec 7 10:38:52 2016 -0800

----------------------------------------------------------------------
 ambari-agent/conf/unix/ambari-agent.ini         |  1 +
 .../src/main/python/ambari_agent/Hardware.py    | 52 ++++++++++++++-
 .../TestCustomServiceOrchestrator.py            |  4 +-
 .../test/python/ambari_agent/TestHardware.py    | 70 ++++++++++++++++++++
 4 files changed, 122 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/51ec956b/ambari-agent/conf/unix/ambari-agent.ini
----------------------------------------------------------------------
diff --git a/ambari-agent/conf/unix/ambari-agent.ini b/ambari-agent/conf/unix/ambari-agent.ini
index d6fcf5f..56fa605 100644
--- a/ambari-agent/conf/unix/ambari-agent.ini
+++ b/ambari-agent/conf/unix/ambari-agent.ini
@@ -40,6 +40,7 @@ alert_kinit_timeout=14400000
 system_resource_overrides=/etc/resource_overrides
 ; memory_threshold_soft_mb=400
 ; memory_threshold_hard_mb=1000
+; ignore_mount_points=/mnt/custom1,/mnt/custom2
 
 [security]
 keysdir=/var/lib/ambari-agent/keys

http://git-wip-us.apache.org/repos/asf/ambari/blob/51ec956b/ambari-agent/src/main/python/ambari_agent/Hardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index 3c94d28..0d431a3 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -41,6 +41,7 @@ class Hardware:
   CHECK_REMOTE_MOUNTS_TIMEOUT_DEFAULT = '10'
   IGNORE_ROOT_MOUNTS = ["proc", "dev", "sys"]
   IGNORE_DEVICES = ["proc", "tmpfs", "cgroup", "mqueue", "shm"]
+  LINUX_PATH_SEP = "/"
 
   def __init__(self, config):
     self.hardware = {
@@ -88,6 +89,37 @@ class Hardware:
     return True
 
   @classmethod
+  def _is_mount_blacklisted(cls, blacklist, mount_point):
+    """
+    Verify if particular mount point is in the black list.
+
+    :return True if mount_point or a part of mount point is in the blacklist, otherwise return False
+
+     Example:
+       Mounts: /, /mnt/my_mount, /mnt/my_mount/sub_mount
+       Blacklist: /mnt/my_mount
+       Result: /
+
+    :type blacklist list
+    :type mount_point str
+    :rtype bool
+    """
+
+    if not blacklist or not mount_point:
+      return False
+
+    mount_point_elements = mount_point.split(cls.LINUX_PATH_SEP)
+
+    for el in blacklist:
+      el_list = el.split(cls.LINUX_PATH_SEP)
+      # making patch elements comparision
+      if el_list == mount_point_elements[:len(el_list)]:
+        return True
+
+    return False
+
+
+  @classmethod
   @OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
   def osdisks(cls, config=None):
     """ Run df to find out the disks on the host. Only works on linux
@@ -95,6 +127,11 @@ class Hardware:
     and any mounts with spaces. """
     timeout = cls._get_mount_check_timeout(config)
     command = ["timeout", timeout, "df", "-kPT"]
+    blacklisted_mount_points = []
+
+    if config:
+      ignore_mount_value = config.get("agent", "ignore_mount_points", default="")
+      blacklisted_mount_points = [item.strip() for item in ignore_mount_value.split(",")]
 
     if not cls._check_remote_mounts(config):
       command.append("-l")
@@ -103,6 +140,7 @@ class Hardware:
     dfdata = df.communicate()[0]
     mounts = [cls._parse_df_line(line) for line in dfdata.splitlines() if line]
     result_mounts = []
+    ignored_mounts = []
 
     for mount in mounts:
       if not mount:
@@ -113,13 +151,21 @@ class Hardware:
        - mounted device is not in the ignored list
        - is accessible to user under which current process running
        - it is not file-mount (docker environment)
+       - mount path or a part of mount path is not in the blacklist
       """
-      if mount["device"] not in cls.IGNORE_DEVICES and \
+      if mount["device"] not in cls.IGNORE_DEVICES and\
          mount["mountpoint"].split("/")[0] not in cls.IGNORE_ROOT_MOUNTS and\
-         cls._chk_writable_mount(mount['mountpoint']) and \
-         not path_isfile(mount["mountpoint"]):
+         cls._chk_writable_mount(mount['mountpoint']) and\
+         not path_isfile(mount["mountpoint"]) and\
+         not cls._is_mount_blacklisted(blacklisted_mount_points, mount["mountpoint"]):
 
         result_mounts.append(mount)
+      else:
+        ignored_mounts.append(mount)
+
+      if len(ignored_mounts) > 0:
+        ignore_list = [el["mountpoint"] for el in ignored_mounts]
+        logger.info("Some mount points was ignored: {0}".format(', '.join(ignore_list)))
 
     return result_mounts
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/51ec956b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
index 5323d9a..563d250 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestCustomServiceOrchestrator.py
@@ -68,7 +68,7 @@ class TestCustomServiceOrchestrator(TestCase):
   def test_add_reg_listener_to_controller(self, FileCache_mock):
     FileCache_mock.return_value = None
     dummy_controller = MagicMock()
-    config = AmbariConfig().getConfig()
+    config = AmbariConfig()
     tempdir = tempfile.gettempdir()
     config.set('agent', 'prefix', tempdir)
     CustomServiceOrchestrator(config, dummy_controller)
@@ -204,7 +204,7 @@ class TestCustomServiceOrchestrator(TestCase):
   def test_resolve_script_path(self, FileCache_mock, exists_mock):
     FileCache_mock.return_value = None
     dummy_controller = MagicMock()
-    config = AmbariConfig().getConfig()
+    config = AmbariConfig()
     orchestrator = CustomServiceOrchestrator(config, dummy_controller)
     # Testing existing path
     exists_mock.return_value = True

http://git-wip-us.apache.org/repos/asf/ambari/blob/51ec956b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHardware.py b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
index 038b2f8..ff3b40b 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHardware.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
@@ -25,6 +25,7 @@ from mock.mock import patch, MagicMock, Mock
 import unittest
 import platform
 import socket
+import os
 from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 from ambari_agent import hostname
 from ambari_agent.Hardware import Hardware
@@ -373,6 +374,75 @@ SwapFree:        1598676 kB
     self.assertEquals(2, json_mock.call_count)
     self.assertEquals('value', result['key'])
 
+  @patch.object(Hardware, "_chk_writable_mount")
+  @patch("ambari_agent.Hardware.path_isfile")
+  def test_osdisks_blacklist(self, isfile_mock, chk_writable_mount_mock):
+    df_output = \
+      """Filesystem                                                                                        Type  1024-blocks     Used Available Capacity Mounted on
+      /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs      31447040  1282384  30164656       5% /
+      tmpfs                                                                                             tmpfs    32938336        4  32938332       1% /dev
+      tmpfs                                                                                             tmpfs    32938336        0  32938336       0% /sys/fs/cgroup
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/resolv.conf
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hostname
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hosts
+      shm                                                                                               tmpfs       65536        0     65536       0% /dev/shm
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /run/secrets
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /mnt/blacklisted_mount
+      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /mnt/blacklisted_mount/sub-dir
+      """
+
+    def isfile_side_effect(path):
+      assume_files = ["/etc/resolv.conf", "/etc/hostname", "/etc/hosts"]
+      return path in assume_files
+
+    def chk_writable_mount_side_effect(path):
+      assume_read_only = ["/run/secrets"]
+      return path not in assume_read_only
+
+    isfile_mock.side_effect = isfile_side_effect
+    chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect
+
+    config_dict = {
+      "agent": {
+        "ignore_mount_points": "/mnt/blacklisted_mount"
+      }
+    }
+
+    with patch("subprocess.Popen") as open_mock:
+      proc_mock = Mock()
+      attr = {
+        'communicate.return_value': [
+          df_output
+        ]
+      }
+      proc_mock.configure_mock(**attr)
+      open_mock.return_value = proc_mock
+
+      def conf_get(section, key, default=""):
+        if section in config_dict and key in config_dict[section]:
+          return config_dict[section][key]
+
+        return default
+
+      def has_option(section, key):
+        return section in config_dict and key in config_dict[section]
+
+      conf = Mock()
+      attr = {
+        'get.side_effect': conf_get,
+        'has_option.side_effect': has_option
+      }
+      conf.configure_mock(**attr)
+
+      result = Hardware.osdisks(conf)
+
+    self.assertEquals(1, len(result))
+
+    expected_mounts_left = ["/"]
+    mounts_left = [item["mountpoint"] for item in result]
+
+    self.assertEquals(expected_mounts_left, mounts_left)
+
 
 if __name__ == "__main__":
   unittest.main()


[06/41] ambari git commit: AMBARI-18965 : Investigate and reduce AMS HA dependency on Zookeeper. (avijayan)

Posted by jo...@apache.org.
AMBARI-18965 : Investigate and reduce AMS HA dependency on Zookeeper. (avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e88f8464
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e88f8464
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e88f8464

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: e88f84646d587549d00ef83496f4e9b61572d2cf
Parents: c711850
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Mon Dec 5 14:30:53 2016 -0800
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Mon Dec 5 14:30:53 2016 -0800

----------------------------------------------------------------------
 .../timeline/AbstractTimelineMetricsSink.java   | 40 ++++++++++++++++----
 .../availability/MetricCollectorHAHelper.java   |  1 -
 .../availability/MetricCollectorHATest.java     |  1 -
 .../src/main/python/core/config_reader.py       |  2 +-
 .../timeline/HBaseTimelineMetricStore.java      | 18 ++++-----
 .../timeline/TimelineMetricConfiguration.java   | 14 +++++++
 .../aggregators/AbstractTimelineAggregator.java |  6 +--
 .../MetricCollectorHAController.java            |  4 +-
 .../TestApplicationHistoryServer.java           | 14 ++++++-
 .../MetricCollectorHAControllerTest.java        |  4 +-
 .../0.1.0/configuration/ams-env.xml             |  2 +-
 .../0.1.0/configuration/ams-site.xml            | 12 ++++++
 .../0.1.0/package/scripts/params.py             | 13 ++++---
 13 files changed, 96 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
index 2c0c822..f92f968 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/AbstractTimelineMetricsSink.java
@@ -84,6 +84,10 @@ public abstract class AbstractTimelineMetricsSink {
   public int ZK_CONNECT_TRY_COUNT = 10;
   public int ZK_SLEEP_BETWEEN_RETRY_TIME = 2000;
   public boolean shardExpired = true;
+  private int zookeeperMinBackoffTimeMins = 2;
+  private int zookeeperMaxBackoffTimeMins = 5;
+  private long zookeeperBackoffTimeMillis;
+  private long lastFailedZkRequestTime = 0l;
 
   private SSLSocketFactory sslSocketFactory;
 
@@ -132,6 +136,7 @@ public abstract class AbstractTimelineMetricsSink {
     metricSinkWriteShardStrategy = new MetricSinkWriteShardHostnameHashingStrategy(getHostname());
     collectorHAHelper = new MetricCollectorHAHelper(getZookeeperQuorum(),
       ZK_CONNECT_TRY_COUNT, ZK_SLEEP_BETWEEN_RETRY_TIME);
+    zookeeperBackoffTimeMillis = getZookeeperBackoffTimeMillis();
     isInitializedForHA = true;
   }
 
@@ -337,13 +342,27 @@ public abstract class AbstractTimelineMetricsSink {
     }
 
     // Reach out to all configured collectors before Zookeeper
-    refreshCollectorsFromConfigured();
+    Collection<String> collectorHosts = getConfiguredCollectorHosts();
+    refreshCollectorsFromConfigured(collectorHosts);
 
     // Lookup Zookeeper for live hosts - max 10 seconds wait time
-    if (allKnownLiveCollectors.size() == 0 && getZookeeperQuorum() != null) {
-      //TODO : Bring back Zk fallback after proper curation.
-      LOG.info("No live collectors from configuration. Not requesting zookeeper...");
-      //allKnownLiveCollectors.addAll(collectorHAHelper.findLiveCollectorHostsFromZNode());
+    long currentTime = System.currentTimeMillis();
+    if (allKnownLiveCollectors.size() == 0 && getZookeeperQuorum() != null
+      && (currentTime - lastFailedZkRequestTime) > zookeeperBackoffTimeMillis) {
+
+      LOG.info("No live collectors from configuration. Requesting zookeeper...");
+      allKnownLiveCollectors.addAll(collectorHAHelper.findLiveCollectorHostsFromZNode());
+      boolean noNewCollectorFromZk = true;
+      for (String collectorHostFromZk : allKnownLiveCollectors) {
+        if (!collectorHosts.contains(collectorHostFromZk)) {
+          noNewCollectorFromZk = false;
+          break;
+        }
+      }
+      if (noNewCollectorFromZk) {
+        LOG.info("No new collector was found from Zookeeper. Will not request zookeeper for " + zookeeperBackoffTimeMillis + " millis");
+        lastFailedZkRequestTime = System.currentTimeMillis();
+      }
     }
 
     if (allKnownLiveCollectors.size() != 0) {
@@ -356,7 +375,7 @@ public abstract class AbstractTimelineMetricsSink {
             // OR
             // through Expiry (Refresh needed to pick up dead collectors that might have not become alive).
             if (shardExpired) {
-              refreshCollectorsFromConfigured();
+              refreshCollectorsFromConfigured(getConfiguredCollectorHosts());
             }
             return metricSinkWriteShardStrategy.findCollectorShard(new ArrayList<>(allKnownLiveCollectors));
           }
@@ -376,8 +395,7 @@ public abstract class AbstractTimelineMetricsSink {
     return null;
   }
 
-  private void refreshCollectorsFromConfigured() {
-    Collection<String> collectorHosts = getConfiguredCollectorHosts();
+  private void refreshCollectorsFromConfigured(Collection<String> collectorHosts) {
 
     LOG.debug("Trying to find live collector host from : " + collectorHosts);
     if (collectorHosts != null && !collectorHosts.isEmpty()) {
@@ -497,6 +515,12 @@ public abstract class AbstractTimelineMetricsSink {
     return hosts;
   }
 
+
+  private long getZookeeperBackoffTimeMillis() {
+    return (zookeeperMinBackoffTimeMins +
+      rand.nextInt(zookeeperMaxBackoffTimeMins - zookeeperMinBackoffTimeMins + 1)) * 60*1000l;
+  }
+
   /**
    * Get a pre-formatted URI for the collector
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java
index 54c2bdf..c6f6beb 100644
--- a/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java
+++ b/ambari-metrics/ambari-metrics-common/src/main/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHAHelper.java
@@ -23,7 +23,6 @@ import org.apache.curator.CuratorZookeeperClient;
 import org.apache.curator.RetryLoop;
 import org.apache.curator.RetryPolicy;
 import org.apache.curator.retry.BoundedExponentialBackoffRetry;
-import org.apache.curator.retry.RetryUntilElapsed;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.data.Stat;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
index a0bc2e8..a393a96 100644
--- a/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
+++ b/ambari-metrics/ambari-metrics-common/src/test/java/org/apache/hadoop/metrics2/sink/timeline/availability/MetricCollectorHATest.java
@@ -49,7 +49,6 @@ import static org.powermock.api.easymock.PowerMock.verifyAll;
 @PrepareForTest({AbstractTimelineMetricsSink.class, URL.class, HttpURLConnection.class, MetricCollectorHAHelper.class})
 public class MetricCollectorHATest {
 
-  @Ignore
   @Test
   public void findCollectorUsingZKTest() throws Exception {
     InputStream is = createNiceMock(InputStream.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
index b84979a..7010187 100644
--- a/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
+++ b/ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/config_reader.py
@@ -223,7 +223,7 @@ class Configuration:
     return self.get("collector", "failover_strategy", ROUND_ROBIN_FAILOVER_STRATEGY)
 
   def get_failover_strategy_blacklisted_interval_seconds(self):
-    return self.get("collector", "failover_strategy_blacklisted_interval_seconds", 600)
+    return self.get("collector", "failover_strategy_blacklisted_interval_seconds", 300)
 
   def get_hostname_script(self):
     if self.hostname_script:

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index a6f8e2f..5dc3aee 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -106,16 +106,14 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
       // Initialize policies before TTL update
       hBaseAccessor.initPoliciesAndTTL();
       // Start HA service
-      if (configuration.isDistributedOperationModeEnabled()) {
-        // Start the controller
-        haController = new MetricCollectorHAController(configuration);
-        try {
-          haController.initializeHAController();
-        } catch (Exception e) {
-          LOG.error(e);
-          throw new MetricsSystemInitializationException("Unable to " +
-            "initialize HA controller", e);
-        }
+      // Start the controller
+      haController = new MetricCollectorHAController(configuration);
+      try {
+        haController.initializeHAController();
+      } catch (Exception e) {
+        LOG.error(e);
+        throw new MetricsSystemInitializationException("Unable to " +
+          "initialize HA controller", e);
       }
 
       String whitelistFile = metricsConf.get(TIMELINE_METRICS_WHITELIST_FILE, "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index 22710b0..13d361b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -323,6 +323,20 @@ public class TimelineMetricConfiguration {
     return hbaseConf.getTrimmed("hbase.zookeeper.quorum");
   }
 
+  public String getClusterZKClientPort() throws MalformedURLException, URISyntaxException {
+    if (!isInitialized) {
+      initialize();
+    }
+    return metricsConf.getTrimmed("cluster.zookeeper.property.clientPort", "2181");
+  }
+
+  public String getClusterZKQuorum() throws MalformedURLException, URISyntaxException {
+    if (!isInitialized) {
+      initialize();
+    }
+    return metricsConf.getTrimmed("cluster.zookeeper.quorum");
+  }
+
   public String getInstanceHostnameFromEnv() throws UnknownHostException {
     String amsInstanceName = System.getProperty("AMS_INSTANCE_NAME");
     if (amsInstanceName == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
index 1096bc4..cb131d3 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregator.java
@@ -317,9 +317,9 @@ public abstract class AbstractTimelineAggregator implements TimelineMetricAggreg
 
   protected void downsample(Connection conn, Long startTime, Long endTime) {
 
-    LOG.info("Checking for downsampling requests.");
+    LOG.debug("Checking for downsampling requests.");
     if (CollectionUtils.isEmpty(configuredDownSamplers)) {
-      LOG.info("No downsamplers configured");
+      LOG.debug("No downsamplers configured");
       return;
     }
 
@@ -406,7 +406,7 @@ public abstract class AbstractTimelineAggregator implements TimelineMetricAggreg
 
     PreparedStatement stmt = null;
     ResultSet rs = null;
-    LOG.info("Downsampling query : " + condition.getStatement());
+    LOG.debug("Downsampling query : " + condition.getStatement());
 
     try {
       stmt = PhoenixTransactSQL.prepareGetMetricsSqlStmt(conn, condition);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
index edce367..26702b6 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAController.java
@@ -81,8 +81,8 @@ public class MetricCollectorHAController {
     }
 
     try {
-      String zkClientPort = configuration.getZKClientPort();
-      String zkQuorum = configuration.getZKQuorum();
+      String zkClientPort = configuration.getClusterZKClientPort();
+      String zkQuorum = configuration.getClusterZKQuorum();
 
       if (StringUtils.isEmpty(zkClientPort) || StringUtils.isEmpty(zkQuorum)) {
         throw new Exception("Unable to parse zookeeper quorum. clientPort = "

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
index 462213d..3688630 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
@@ -25,8 +25,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.HBaseTimelineMetricStore;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.PhoenixHBaseAccessor;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricConfiguration;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.availability.MetricCollectorHAController;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.query.DefaultPhoenixDataSource;
 import org.apache.zookeeper.ClientCnxn;
 import org.easymock.EasyMock;
@@ -71,7 +73,7 @@ import static org.powermock.api.support.membermodification.MemberMatcher.method;
 import static org.powermock.api.support.membermodification.MemberModifier.suppress;
 
 @RunWith(PowerMockRunner.class)
-@PrepareForTest({ PhoenixHBaseAccessor.class, UserGroupInformation.class,
+@PrepareForTest({ PhoenixHBaseAccessor.class, HBaseTimelineMetricStore.class, UserGroupInformation.class,
   ClientCnxn.class, DefaultPhoenixDataSource.class, ConnectionFactory.class,
   TimelineMetricConfiguration.class, ApplicationHistoryServer.class })
 @PowerMockIgnore( {"javax.management.*"})
@@ -179,6 +181,8 @@ public class TestApplicationHistoryServer {
     expect(metricConfiguration.getTimelineMetricsServiceHandlerThreadCount()).andReturn(20).anyTimes();
     expect(metricConfiguration.getWebappAddress()).andReturn("localhost:9990").anyTimes();
     expect(metricConfiguration.getTimelineServiceRpcAddress()).andReturn("localhost:10299").anyTimes();
+    expect(metricConfiguration.getClusterZKQuorum()).andReturn("localhost").anyTimes();
+    expect(metricConfiguration.getClusterZKClientPort()).andReturn("2181").anyTimes();
 
     Connection connection = createNiceMock(Connection.class);
     Statement stmt = createNiceMock(Statement.class);
@@ -197,6 +201,14 @@ public class TestApplicationHistoryServer {
     connection.close();
     expectLastCall();
 
+    MetricCollectorHAController haControllerMock = PowerMock.createMock(MetricCollectorHAController.class);
+    expectNew(MetricCollectorHAController.class, metricConfiguration)
+      .andReturn(haControllerMock);
+
+    haControllerMock.initializeHAController();
+    expectLastCall().once();
+    expect(haControllerMock.isInitialized()).andReturn(false).anyTimes();
+
     org.apache.hadoop.hbase.client.Connection conn = createNiceMock(org.apache.hadoop.hbase.client.Connection.class);
     mockStatic(ConnectionFactory.class);
     expect(ConnectionFactory.createConnection((Configuration) anyObject())).andReturn(conn);

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java
index 1e4bac0..875f013 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/availability/MetricCollectorHAControllerTest.java
@@ -51,8 +51,8 @@ public class MetricCollectorHAControllerTest extends AbstractMiniHBaseClusterTes
     String port = zkUrl.split(":")[3];
     String quorum = zkUrl.split(":")[2];
 
-    expect(configuration.getZKClientPort()).andReturn(port);
-    expect(configuration.getZKQuorum()).andReturn(quorum);
+    expect(configuration.getClusterZKClientPort()).andReturn(port);
+    expect(configuration.getClusterZKQuorum()).andReturn(quorum);
 
     replay(configuration);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
index 48232af..a33656e 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-env.xml
@@ -82,7 +82,7 @@
   </property>
   <property>
     <name>failover_strategy_blacklisted_interval</name>
-    <value>600</value>
+    <value>300</value>
     <description>
       Metrics collector host will be blacklisted for specified number of seconds if metric monitor failed to connect to it.
     </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
index 9235777..45fe1a7 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-site.xml
@@ -783,4 +783,16 @@
     </description>
     <on-ambari-upgrade add="true"/>
   </property>
+  <property>
+    <name>cluster.zookeeper.quorum</name>
+    <value>{{cluster_zookeeper_quorum_hosts}}</value>
+    <description>Comma separated list of servers in the cluster ZooKeeper Quorum.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>cluster.zookeeper.property.clientPort</name>
+    <value>{{cluster_zookeeper_clientPort}}</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e88f8464/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 529cf4c..5ab6a4c 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -240,15 +240,18 @@ else:
 max_open_files_limit = default("/configurations/ams-hbase-env/max_open_files_limit", "32768")
 hostname = config["hostname"]
 
+cluster_zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts'])
+if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+  cluster_zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+else:
+  cluster_zookeeper_clientPort = '2181'
+
 if not is_hbase_distributed:
   zookeeper_quorum_hosts = hostname
   zookeeper_clientPort = '61181'
 else:
-  zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts'])
-  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
-    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
-  else:
-    zookeeper_clientPort = '2181'
+  zookeeper_quorum_hosts = cluster_zookeeper_quorum_hosts
+  zookeeper_clientPort = cluster_zookeeper_clientPort
 
 ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir']
 _hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir']


[19/41] ambari git commit: AMBARI-19092. HDP 3.0 support for ZK with metainfo (alejandro)

Posted by jo...@apache.org.
AMBARI-19092. HDP 3.0 support for ZK with metainfo (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aed98397
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aed98397
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aed98397

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: aed983973034114931aca387f92bae5ca0d4e169
Parents: 757267f
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Fri Dec 2 13:26:49 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Tue Dec 6 16:52:32 2016 -0800

----------------------------------------------------------------------
 .../HDP/3.0/services/ZOOKEEPER/metainfo.xml     | 54 ++++++++++++++++++++
 1 file changed, 54 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aed98397/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..5d4fb4d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <displayName>ZooKeeper</displayName>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.6.3.0</version>
+      <extends>common-services/ZOOKEEPER/3.4.6</extends>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper_${stack_version}</name>
+            </package>
+            <package>
+              <name>zookeeper_${stack_version}-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper-${stack_version}</name>
+            </package>
+            <package>
+              <name>zookeeper-${stack_version}-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>


[41/41] ambari git commit: Merge branch 'trunk' into branch-feature-AMBARI-18456

Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/88c28925
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/88c28925
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/88c28925

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 88c2892527b0a6ee97a3e237ebc76a896ce865e9
Parents: 803f44b d1293e0
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Dec 7 16:16:53 2016 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Dec 7 16:16:53 2016 -0500

----------------------------------------------------------------------
 ambari-agent/conf/unix/ambari-agent.ini         |   1 +
 .../src/main/python/ambari_agent/Hardware.py    |  52 +-
 .../src/main/python/ambari_agent/HostInfo.py    |  12 +-
 .../TestCustomServiceOrchestrator.py            |   4 +-
 .../test/python/ambari_agent/TestHardware.py    |  70 ++
 .../ambari_commons/ambari_metrics_helper.py     |  26 +-
 .../main/python/ambari_commons/logging_utils.py |   5 +-
 .../libraries/functions/copy_tarball.py         |   2 +-
 .../dynamic_variable_interpretation.py          |   2 +-
 .../timeline/AbstractTimelineMetricsSink.java   |  40 +-
 .../availability/MetricCollectorHAHelper.java   |   1 -
 .../availability/MetricCollectorHATest.java     |   1 -
 .../src/main/python/core/config_reader.py       |   2 +-
 .../timeline/HBaseTimelineMetricStore.java      |  18 +-
 .../timeline/TimelineMetricConfiguration.java   |  14 +
 .../aggregators/AbstractTimelineAggregator.java |   6 +-
 .../MetricCollectorHAController.java            |   4 +-
 .../TestApplicationHistoryServer.java           |  14 +-
 .../MetricCollectorHAControllerTest.java        |   4 +-
 ambari-project/pom.xml                          |  41 +
 ambari-server/checkstyle.xml                    |  17 +
 ambari-server/pom.xml                           |   4 +
 .../ambari/server/agent/AgentRequests.java      |   2 +-
 .../ambari/server/agent/HeartbeatProcessor.java |  15 +-
 .../ambari/server/checks/CheckDescription.java  |   4 +-
 .../internal/AlertTargetResourceProvider.java   |   2 +-
 .../ClusterStackVersionResourceProvider.java    |   4 +-
 .../internal/UpgradeResourceProvider.java       |   2 +-
 .../logging/LogSearchDataRetrievalService.java  |   7 +-
 .../apache/ambari/server/orm/dao/AlertsDAO.java |   6 +-
 .../ambari/server/orm/entities/GroupEntity.java |   5 +-
 .../server/security/authorization/Users.java    |  10 +-
 .../apache/ambari/server/state/ServiceImpl.java |   2 +-
 .../server/state/cluster/ClustersImpl.java      |   2 +-
 .../ambari/server/state/host/HostImpl.java      |   2 +-
 .../services/RetryUpgradeActionService.java     |   2 +-
 .../ambari/server/state/stack/UpgradePack.java  |  19 +-
 .../svccomphost/ServiceComponentHostImpl.java   |   2 +-
 .../server/upgrade/AbstractUpgradeCatalog.java  |  25 +-
 .../server/upgrade/UpgradeCatalog240.java       |   4 +-
 .../main/python/ambari_server/serverUpgrade.py  |  38 +-
 .../AMBARI_INFRA/0.1.0/metainfo.xml             |   1 +
 .../0.1.0/package/scripts/setup_infra_solr.py   |   3 +-
 .../0.1.0/configuration/ams-env.xml             |   2 +-
 .../0.1.0/configuration/ams-site.xml            |  12 +
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |   1 +
 .../0.1.0/package/scripts/metrics_collector.py  |   2 +
 .../package/scripts/metrics_grafana_util.py     |   7 +-
 .../0.1.0/package/scripts/params.py             |  22 +-
 .../0.1.0/package/scripts/service_check.py      |   2 +-
 .../LOGSEARCH/0.5.0/metainfo.xml                |   1 +
 .../0.5.0/package/scripts/setup_logsearch.py    |   3 +-
 .../stacks/HDP/2.3/services/stack_advisor.py    |   4 +-
 .../HDP/3.0/configuration/cluster-env.xml       | 293 +++++++
 .../HDP/3.0/hooks/after-INSTALL/scripts/hook.py |  37 +
 .../3.0/hooks/after-INSTALL/scripts/params.py   |  97 +++
 .../scripts/shared_initialization.py            | 111 +++
 .../hooks/before-ANY/files/changeToSecureUid.sh |  53 ++
 .../HDP/3.0/hooks/before-ANY/scripts/hook.py    |  36 +
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  | 231 ++++++
 .../before-ANY/scripts/shared_initialization.py | 226 +++++
 .../3.0/hooks/before-INSTALL/scripts/hook.py    |  37 +
 .../3.0/hooks/before-INSTALL/scripts/params.py  | 113 +++
 .../scripts/repo_initialization.py              |  68 ++
 .../scripts/shared_initialization.py            |  37 +
 .../3.0/hooks/before-RESTART/scripts/hook.py    |  29 +
 .../hooks/before-START/files/checkForFormat.sh  |  65 ++
 .../before-START/files/fast-hdfs-resource.jar   | Bin 0 -> 19285850 bytes
 .../before-START/files/task-log4j.properties    | 134 +++
 .../hooks/before-START/files/topology_script.py |  66 ++
 .../HDP/3.0/hooks/before-START/scripts/hook.py  |  39 +
 .../3.0/hooks/before-START/scripts/params.py    | 326 ++++++++
 .../before-START/scripts/rack_awareness.py      |  47 ++
 .../scripts/shared_initialization.py            | 191 +++++
 .../templates/commons-logging.properties.j2     |  43 +
 .../templates/exclude_hosts_list.j2             |  21 +
 .../templates/hadoop-metrics2.properties.j2     | 105 +++
 .../before-START/templates/health_check.j2      |  81 ++
 .../templates/include_hosts_list.j2             |  21 +
 .../templates/topology_mappings.data.j2         |  24 +
 .../main/resources/stacks/HDP/3.0/kerberos.json |  78 ++
 .../main/resources/stacks/HDP/3.0/metainfo.xml  |  24 +
 .../HDP/3.0/properties/stack_features.json      | 323 ++++++++
 .../stacks/HDP/3.0/properties/stack_tools.json  |   4 +
 .../resources/stacks/HDP/3.0/repos/repoinfo.xml | 132 +++
 .../services/HDFS/configuration/core-site.xml   |  56 ++
 .../services/HDFS/configuration/hadoop-env.xml  | 200 +++++
 .../services/HDFS/configuration/hdfs-log4j.xml  | 226 +++++
 .../services/HDFS/configuration/hdfs-site.xml   | 153 ++++
 .../HDFS/configuration/ranger-hdfs-audit.xml    | 217 +++++
 .../ranger-hdfs-plugin-properties.xml           |  98 +++
 .../configuration/ranger-hdfs-policymgr-ssl.xml |  67 ++
 .../HDFS/configuration/ranger-hdfs-security.xml |  65 ++
 .../services/HDFS/configuration/widgets.json    | 649 +++++++++++++++
 .../stacks/HDP/3.0/services/HDFS/kerberos.json  | 246 ++++++
 .../stacks/HDP/3.0/services/HDFS/metainfo.xml   | 190 +++++
 .../services/HDFS/quicklinks/quicklinks.json    |  80 ++
 .../HDP/3.0/services/HDFS/themes/theme.json     | 179 ++++
 .../HDP/3.0/services/YARN/YARN_widgets.json     | 670 +++++++++++++++
 .../YARN/configuration-mapred/mapred-env.xml    |  51 ++
 .../YARN/configuration-mapred/mapred-site.xml   | 134 +++
 .../YARN/configuration/capacity-scheduler.xml   |  71 ++
 .../YARN/configuration/ranger-yarn-audit.xml    | 177 ++++
 .../ranger-yarn-plugin-properties.xml           |  82 ++
 .../configuration/ranger-yarn-policymgr-ssl.xml |  66 ++
 .../YARN/configuration/ranger-yarn-security.xml |  58 ++
 .../services/YARN/configuration/yarn-env.xml    | 200 +++++
 .../services/YARN/configuration/yarn-log4j.xml  | 103 +++
 .../services/YARN/configuration/yarn-site.xml   | 814 +++++++++++++++++++
 .../stacks/HDP/3.0/services/YARN/kerberos.json  | 278 +++++++
 .../stacks/HDP/3.0/services/YARN/metainfo.xml   | 173 ++++
 .../YARN/quicklinks-mapred/quicklinks.json      |  80 ++
 .../services/YARN/quicklinks/quicklinks.json    |  80 ++
 .../3.0/services/YARN/themes-mapred/theme.json  | 132 +++
 .../HDP/3.0/services/YARN/themes/theme.json     | 250 ++++++
 .../HDP/3.0/services/ZOOKEEPER/metainfo.xml     |  54 ++
 .../main/resources/stacks/HDP/3.0/widgets.json  |  95 +++
 .../2.1/hooks/before-START/scripts/params.py    |   2 +-
 .../LogSearchDataRetrievalServiceTest.java      | 142 ++--
 .../ldap/AmbariLdapDataPopulatorTest.java       |   3 +-
 .../ambari/server/state/UpgradeHelperTest.java  |  39 +-
 .../server/upgrade/UpgradeCatalog210Test.java   |   2 +
 .../stacks/2.3/common/test_stack_advisor.py     |  29 +-
 .../stacks/2.4/AMBARI_INFRA/test_infra_solr.py  |   2 +-
 .../stacks/2.4/LOGSEARCH/test_logsearch.py      |   2 +-
 .../app/controllers/global/update_controller.js |   5 +-
 ambari-web/app/controllers/installer.js         |   3 +-
 .../journalNode/step4_controller.js             |   6 +-
 .../journalNode/wizard_controller.js            |  28 +-
 .../main/admin/stack_and_upgrade_controller.js  |  13 +-
 .../app/controllers/wizard/step1_controller.js  |  21 +-
 ambari-web/app/messages.js                      |   2 -
 .../common/widgets/export_metrics_mixin.js      |  28 +-
 .../main/host/details/actions/check_host.js     |   3 +-
 .../mixins/wizard/assign_master_components.js   |   1 +
 .../app/routes/manage_journalnode_routes.js     |   4 +-
 .../app/styles/theme/bootstrap-ambari.css       |  29 +-
 ambari-web/app/styles/wizard.less               |  13 +-
 .../templates/common/host_progress_popup.hbs    |   8 +-
 .../highAvailability/journalNode/wizard.hbs     |  13 +-
 .../admin/stack_upgrade/upgrade_options.hbs     |  58 +-
 .../app/templates/main/host/host_alerts.hbs     |   8 +-
 ambari-web/app/templates/wizard/step8.hbs       |   2 +-
 ambari-web/app/views/common/controls_view.js    |   3 +-
 .../highAvailability/journalNode/step2_view.js  |   5 +-
 .../admin/stack_upgrade/upgrade_wizard_view.js  |   6 +-
 .../app/views/main/host/host_alerts_view.js     |  41 +-
 ambari-web/app/views/main/host/menu.js          |   4 +-
 ambari-web/test/controllers/installer_test.js   |  13 +-
 .../test/views/common/controls_view_test.js     |  74 +-
 .../stack_upgrade/upgrade_wizard_view_test.js   |   2 +-
 .../views/main/host/host_alerts_view_test.js    |   3 +-
 .../ui/app/templates/components/job-details.hbs |   2 +-
 pom.xml                                         |   1 +
 utility/pom.xml                                 |  33 +-
 ...AvoidTransactionalOnPrivateMethodsCheck.java |  55 ++
 .../src/main/resources/checkstyle_packages.xml  |  15 +
 ...dTransactionalOnPrivateMethodsCheckTest.java |  49 ++
 .../InputTransactionalOnPrivateMethods.java     |  46 ++
 159 files changed, 10080 insertions(+), 377 deletions(-)
----------------------------------------------------------------------



[08/41] ambari git commit: AMBARI-19103. Support for multi lines tabs (onechiporenko)

Posted by jo...@apache.org.
AMBARI-19103. Support for multi lines tabs (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/14c39691
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/14c39691
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/14c39691

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 14c39691e06bde1d158c75e65251dc1dc1ba20e9
Parents: 353b2ac
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Tue Dec 6 12:14:55 2016 +0200
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Tue Dec 6 12:14:55 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/styles/theme/bootstrap-ambari.css | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/14c39691/ambari-web/app/styles/theme/bootstrap-ambari.css
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/theme/bootstrap-ambari.css b/ambari-web/app/styles/theme/bootstrap-ambari.css
index d10a13b..738fa59 100644
--- a/ambari-web/app/styles/theme/bootstrap-ambari.css
+++ b/ambari-web/app/styles/theme/bootstrap-ambari.css
@@ -452,7 +452,9 @@ h2.table-title {
 .nav.nav-tabs li a:active,
 .nav.nav-tabs li a:focus {
   color: #333;
-  border-width: 0;
+  border-top-width: 0;
+  border-left-width: 0;
+  border-right-width: 0;
   background: none;
 }
 .nav.nav-tabs li a .badge.badge-important {


[02/41] ambari git commit: AMBARI-19039: In stack version, build number should not be mandatory (Amruta Borkar via dili)

Posted by jo...@apache.org.
AMBARI-19039: In stack version, build number should not be mandatory (Amruta Borkar via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b5122e25
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b5122e25
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b5122e25

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: b5122e258bfe5a2e56511eb9dd29a70cf1260a9c
Parents: 50ead4d
Author: Di Li <di...@apache.org>
Authored: Mon Dec 5 16:03:50 2016 -0500
Committer: Di Li <di...@apache.org>
Committed: Mon Dec 5 16:03:50 2016 -0500

----------------------------------------------------------------------
 .../python/resource_management/libraries/functions/copy_tarball.py | 2 +-
 .../libraries/functions/dynamic_variable_interpretation.py         | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b5122e25/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index 519c88b..31a9be4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -188,7 +188,7 @@ def _get_single_version_from_stack_select():
     Logger.error("Could not verify stack version by calling '{0}'. Return Code: {1}, Output: {2}.".format(get_stack_versions_cmd, str(code), str(out)))
     return None
 
-  matches = re.findall(r"([\d\.]+\-\d+)", out)
+  matches = re.findall(r"([\d\.]+(?:-\d+)?)", out)
 
   if matches and len(matches) == 1:
     stack_version = matches[0]

http://git-wip-us.apache.org/repos/asf/ambari/blob/b5122e25/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
index ca8fe19..74db04a 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
@@ -185,7 +185,7 @@ def copy_tarballs_to_hdfs(tarball_prefix, stack_select_component_name, component
                    (get_stack_version_cmd, str(code), str(out)))
     return 1
 
-  matches = re.findall(r"([\d\.]+\-\d+)", out)
+  matches = re.findall(r"([\d\.]+(?:-\d+)?)", out)
   stack_version = matches[0] if matches and len(matches) > 0 else None
 
   if not stack_version:


[04/41] ambari git commit: AMBARI-19082. Ambari Server upgrade from 2.0.2 to 2.2.2 changed the following configs and added a port of -1.(vbrodetskyi)

Posted by jo...@apache.org.
AMBARI-19082. Ambari Server upgrade from 2.0.2 to 2.2.2 changed the following configs and added a port of -1.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/edf8ab24
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/edf8ab24
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/edf8ab24

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: edf8ab2432af6b73f73c4ae386ff13c93c6c38c2
Parents: a00ffff
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Tue Dec 6 00:06:31 2016 +0200
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Tue Dec 6 00:06:31 2016 +0200

----------------------------------------------------------------------
 .../server/upgrade/AbstractUpgradeCatalog.java  | 21 ++++++++++++--------
 .../server/upgrade/UpgradeCatalog210Test.java   |  2 ++
 2 files changed, 15 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/edf8ab24/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index ce9fcc6..7f91a68 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -17,6 +17,11 @@
  */
 package org.apache.ambari.server.upgrade;
 
+import javax.persistence.EntityManager;
+import javax.xml.bind.JAXBException;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
 import java.io.File;
 import java.io.FileReader;
 import java.io.FilenameFilter;
@@ -42,11 +47,6 @@ import java.util.TreeMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import javax.persistence.EntityManager;
-import javax.xml.bind.JAXBException;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
@@ -447,11 +447,16 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
     Config hdfsSiteConfig = cluster.getDesiredConfigByType(CONFIGURATION_TYPE_HDFS_SITE);
     if (hdfsSiteConfig != null) {
       Map<String, String> properties = hdfsSiteConfig.getProperties();
+      if (properties.containsKey("dfs.internal.nameservices")) {
+        return true;
+      }
       String nameServices = properties.get(PROPERTY_DFS_NAMESERVICES);
       if (!StringUtils.isEmpty(nameServices)) {
-        String namenodes = properties.get(String.format("dfs.ha.namenodes.%s", nameServices));
-        if (!StringUtils.isEmpty(namenodes)) {
-          return (namenodes.split(",").length > 1);
+        for (String nameService : nameServices.split(",")) {
+          String namenodes = properties.get(String.format("dfs.ha.namenodes.%s", nameService));
+          if (!StringUtils.isEmpty(namenodes)) {
+            return (namenodes.split(",").length > 1);
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/edf8ab24/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
index 3bff060..0bd8f48 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
@@ -880,6 +880,8 @@ public class UpgradeCatalog210Test {
 
     final Map<String, String> propertiesExpectedHdfs = new HashMap<String, String>();
     final Map<String, String> propertiesExpectedCoreSite = new HashMap<String, String>();
+    propertiesExpectedHdfs.put("dfs.nameservices", "nncl1,nncl2");
+    propertiesExpectedHdfs.put("dfs.ha.namenodes.nncl2", "nn1,nn2");
     propertiesExpectedCoreSite.put("fs.defaultFS", "hdfs://EXAMPLE.COM:8020");
     final Injector mockInjector = Guice.createInjector(new AbstractModule() {
       @Override


[40/41] ambari git commit: AMBARI-19115. Metrics Monitor start failed: ERROR: Cannot write pid. (aonischuk via swagle)

Posted by jo...@apache.org.
AMBARI-19115. Metrics Monitor start failed: ERROR: Cannot write pid. (aonischuk via swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d1293e01
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d1293e01
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d1293e01

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: d1293e01841d0975d1a3d76dbf676ef27a4bace9
Parents: 51ec956
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Dec 7 12:22:55 2016 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Dec 7 12:22:55 2016 -0800

----------------------------------------------------------------------
 .../common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py     | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d1293e01/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index 6cf743e..1795227 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -369,6 +369,7 @@ def ams(name=None, action=None):
     Directory(params.ams_monitor_pid_dir,
               owner=params.ams_user,
               group=params.user_group,
+              cd_access="a",
               mode=0755,
               create_parents = True
     )


[09/41] ambari git commit: AMBARI-19085 Host Ordered upgrade should not show as an Upgrade option in UI. (atkach)

Posted by jo...@apache.org.
AMBARI-19085 Host Ordered upgrade should not show as an Upgrade option in UI. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a2f7a030
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a2f7a030
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a2f7a030

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: a2f7a03088f553c51be40c16c1b56948a19a15b6
Parents: 14c3969
Author: Andrii Tkach <at...@apache.org>
Authored: Tue Dec 6 12:20:50 2016 +0200
Committer: Andrii Tkach <at...@apache.org>
Committed: Tue Dec 6 12:45:18 2016 +0200

----------------------------------------------------------------------
 .../main/admin/stack_and_upgrade_controller.js  | 13 ++++-
 .../admin/stack_upgrade/upgrade_options.hbs     | 58 ++++++++++----------
 .../admin/stack_upgrade/upgrade_wizard_view.js  |  6 +-
 .../stack_upgrade/upgrade_wizard_view_test.js   |  2 +-
 4 files changed, 48 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a2f7a030/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 35756cd..6d97205 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -52,6 +52,16 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
   upgradeType: null,
 
   /**
+   * @type {Em.Object}
+   */
+  upgradeTypeConfig: Em.computed.findByKey('upgradeMethods', 'type', 'upgradeType'),
+
+  /**
+   * @type {boolean}
+   */
+  cantBeStarted: Em.computed.alias('upgradeTypeConfig.cantBeStarted'),
+
+  /**
    * @type {boolean}
    * @default true
    */
@@ -171,7 +181,8 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
       precheckResultsMessage: '',
       precheckResultsTitle: '',
       action: '',
-      isWizardRestricted: !App.supports.enabledWizardForHostOrderedUpgrade
+      isWizardRestricted: !App.supports.enabledWizardForHostOrderedUpgrade,
+      cantBeStarted: true
     })
   ],
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2f7a030/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs
index 18fcfcf..fe3833c 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/upgrade_options.hbs
@@ -26,35 +26,37 @@
         <div class="text method-text">{{t admin.stackVersions.version.upgrade.upgradeOptions.bodyMsg.method}}</div>
         <div {{bindAttr class=":row :method-options view.isInUpgradeWizard:disabled"}}>
             {{#each method in view.upgradeMethods}}
-              <div class="method-option col-md-6">
-                <div {{bindAttr class="method.allowed::not-allowed method.allowed::not-allowed-by-version
-          method.isPrecheckFailed:not-allowed method.isPrecheckFailed:check-failed
-          method.selected:selected method.type :img-thumbnail"}}
-                    {{action selectMethod method target="view"}}>
-                  <div {{bindAttr class="method.icon :method-icon"}}></div>
-                  <div class="method-name">{{method.displayName}}</div>
-                  <div class="method-description">{{{method.description}}}</div>
-                    {{#if view.showPreUpgradeChecks}}
-                        {{#if method.isCheckRequestInProgress}}
-                          <div class="method-precheck-message checking">
-                              {{view App.SpinnerView message="admin.stackVersions.version.upgrade.upgradeOptions.preCheck.msg.checking"}}
-                          </div>
-                        {{else}}
-                          <div {{bindAttr class=":method-precheck-message method.precheckResultsMessageClass"}}>
-                            <i {{bindAttr class="method.precheckResultsMessageIconClass"}}></i>
-                            <b>{{method.precheckResultsTitle}}</b>&nbsp;
-                            <a {{action runAction method target="view"}}>
-                                {{method.precheckResultsMessage}}
-                            </a>
-                              {{#if method.bypassedFailures}}
-                                <div
-                                  class="alert-danger">{{t admin.stackVersions.version.upgrade.upgradeOptions.errors_bypassed}}</div>
-                              {{/if}}
-                          </div>
+                {{#unless method.cantBeStarted}}
+                  <div class="method-option col-md-6">
+                    <div {{bindAttr class="method.allowed::not-allowed method.allowed::not-allowed-by-version
+                            method.isPrecheckFailed:not-allowed method.isPrecheckFailed:check-failed
+                            method.selected:selected method.type :img-thumbnail"}}
+                        {{action selectMethod method target="view"}}>
+                      <div {{bindAttr class="method.icon :method-icon"}}></div>
+                      <div class="method-name">{{method.displayName}}</div>
+                      <div class="method-description">{{{method.description}}}</div>
+                        {{#if view.showPreUpgradeChecks}}
+                            {{#if method.isCheckRequestInProgress}}
+                              <div class="method-precheck-message checking">
+                                  {{view App.SpinnerView message="admin.stackVersions.version.upgrade.upgradeOptions.preCheck.msg.checking"}}
+                              </div>
+                            {{else}}
+                              <div {{bindAttr class=":method-precheck-message method.precheckResultsMessageClass"}}>
+                                <i {{bindAttr class="method.precheckResultsMessageIconClass"}}></i>
+                                <b>{{method.precheckResultsTitle}}</b>&nbsp;
+                                <a {{action runAction method target="view"}}>
+                                    {{method.precheckResultsMessage}}
+                                </a>
+                                  {{#if method.bypassedFailures}}
+                                    <div
+                                      class="alert-danger">{{t admin.stackVersions.version.upgrade.upgradeOptions.errors_bypassed}}</div>
+                                  {{/if}}
+                              </div>
+                            {{/if}}
                         {{/if}}
-                    {{/if}}
-                </div>
-              </div>
+                    </div>
+                  </div>
+                {{/unless}}
             {{/each}}
         </div>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2f7a030/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
index e54a0fa..bcb1ad7 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
@@ -68,7 +68,11 @@ App.upgradeWizardView = Em.View.extend({
    * when downgrade already started
    * @type {boolean}
    */
-  isDowngradeAvailable: Em.computed.and('!controller.isDowngrade', 'controller.downgradeAllowed'),
+  isDowngradeAvailable: Em.computed.and(
+    '!controller.isDowngrade',
+    'controller.downgradeAllowed',
+    '!controller.cantBeStarted'
+  ),
 
   /**
    * progress value is rounded to floor

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2f7a030/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
index 5630a8c..075bcc8 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
@@ -416,7 +416,7 @@ describe('App.upgradeWizardView', function () {
     });
   });
 
-  App.TestAliases.testAsComputedAnd(getView(), 'isDowngradeAvailable', ['!controller.isDowngrade', 'controller.downgradeAllowed']);
+  App.TestAliases.testAsComputedAnd(getView(), 'isDowngradeAvailable', ['!controller.isDowngrade', 'controller.downgradeAllowed', '!controller.cantBeStarted']);
 
   describe("#taskDetails", function () {
     it("runningItem present", function () {


[12/41] ambari git commit: Revert "AMBARI-18662. Check that Stack Version and repo URLs match up when registering repo (alexantonenko)"

Posted by jo...@apache.org.
Revert "AMBARI-18662. Check that Stack Version and repo URLs match up when registering repo (alexantonenko)"

This reverts commit 00f1e71b62f4133e70d7d8f9f2cd6fa0624ec40d.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2cd12ce4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2cd12ce4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2cd12ce4

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 2cd12ce4cebcf0a3a6fa58ff6f79a79b0a92f631
Parents: 149104c
Author: Alex Antonenko <hi...@gmail.com>
Authored: Fri Dec 2 20:22:01 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Dec 6 19:04:30 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/controllers/installer.js         |  3 +--
 .../app/controllers/wizard/step1_controller.js  | 21 +-------------------
 ambari-web/app/messages.js                      |  2 --
 ambari-web/test/controllers/installer_test.js   | 13 +++++-------
 4 files changed, 7 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2cd12ce4/ambari-web/app/controllers/installer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/installer.js b/ambari-web/app/controllers/installer.js
index f736ba3..e2c60c4 100644
--- a/ambari-web/app/controllers/installer.js
+++ b/ambari-web/app/controllers/installer.js
@@ -841,7 +841,6 @@ App.InstallerController = App.WizardController.extend({
       selectedStack.get('operatingSystems').forEach(function (os) {
         if (os.get('isSelected') && !os.get('isEmpty')) {
           os.get('repositories').forEach(function (repo) {
-            if (!repo.get('isUtils') && wizardStep1Controller.inappropriateUrlForStackVersion(repo, stackVersion)) return;
             repo.setProperties({
               errorTitle: '',
               errorContent: '',
@@ -889,8 +888,8 @@ App.InstallerController = App.WizardController.extend({
       }
     }
     this.set('validationCnt', this.get('validationCnt') - 1);
-    this.set('content.isCheckInProgress', false);
     if (!this.get('validationCnt')) {
+      this.set('content.isCheckInProgress', false);
       data.dfd.resolve();
     }
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/2cd12ce4/ambari-web/app/controllers/wizard/step1_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step1_controller.js b/ambari-web/app/controllers/wizard/step1_controller.js
index 72f3944..f397cd8 100644
--- a/ambari-web/app/controllers/wizard/step1_controller.js
+++ b/ambari-web/app/controllers/wizard/step1_controller.js
@@ -272,25 +272,6 @@ App.WizardStep1Controller = Em.Controller.extend({
     this.selectStackBy('id', event.context.get('id'));
   },
 
-  inappropriateUrlForStackVersion: function (repo, stackVersion) {
-    var baseUrl = repo.get('baseUrl'),
-      splittedBaseUrlArray = baseUrl.split('/'),
-      versionInUrl =  baseUrl[baseUrl.length - 1] === '/' ? splittedBaseUrlArray[splittedBaseUrlArray.length - 2] :
-                      splittedBaseUrlArray[splittedBaseUrlArray.length - 1],
-      versionInUrlString = String(parseFloat(versionInUrl)),
-      UrlIsInvalidForStackVersion = versionInUrlString !== stackVersion;
-
-    if (UrlIsInvalidForStackVersion) {
-      repo.setProperties({
-        validation: App.Repository.validation.INVALID,
-        errorTitle: Em.I18n.t("installer.step1.error.inappropriateUrlForStackVersion.title"),
-        errorContent: Em.I18n.t("installer.step1.error.inappropriateUrlForStackVersion.content").format(stackVersion, versionInUrlString)
-      });
-    }
-
-    return UrlIsInvalidForStackVersion;
-  },
-
   /**
    * Show popup with options to upload new version
    *
@@ -461,7 +442,7 @@ App.WizardStep1Controller = Em.Controller.extend({
            */
           enterUrlRadioButton: App.RadioButtonView.extend({
             labelTranslate: 'installer.step1.useLocalRepo.enterUrl',
-            checked: Em.computed.alias('controller.optionsToSelect.useLocalRepo.enterUrl.isSelected'),
+            checked: Em.computed.alias('controller.optionsToSelect.useLocalRepo.enterUrl.isSelected')
           }),
 
           click: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/2cd12ce4/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 090244c..595cd5d 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -642,8 +642,6 @@ Em.I18n.translations = {
   'installer.step1.invalidURLAttention': '<b>Attention:</b> Please make sure all repository URLs are valid before proceeding.',
   'installer.step1.checkAtLeastOneAttention': '<b>Attention:</b> Please check at least one repository.',
   'installer.step1.retryRepoUrls': 'Click <b>here</b> to retry.',
-  'installer.step1.error.inappropriateUrlForStackVersion.title':'Inappropriate Url for selected Stack Version',
-  'installer.step1.error.inappropriateUrlForStackVersion.content': 'Your stack version is {0} when version in Url is {1}',
 
   'installer.step2.header':'Install Options',
   'installer.step2.body':'Enter the list of hosts to be included in the cluster and provide your SSH key.',

http://git-wip-us.apache.org/repos/asf/ambari/blob/2cd12ce4/ambari-web/test/controllers/installer_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/installer_test.js b/ambari-web/test/controllers/installer_test.js
index 63578bc..441af87 100644
--- a/ambari-web/test/controllers/installer_test.js
+++ b/ambari-web/test/controllers/installer_test.js
@@ -75,8 +75,8 @@ describe('App.InstallerController', function () {
       Em.Object.create({
         isSelected: true,
         reload: false,
-        id: 'nn-2.5',
-        stackNameVersion: 'nn-2.5',
+        id: 'nn-cc',
+        stackNameVersion: 'nn-cc',
         repositories: Em.A([
           Em.Object.create({
             isSelected: true,
@@ -89,7 +89,6 @@ describe('App.InstallerController', function () {
             isEmpty: false,
             repositories: Em.A([
               Em.Object.create({
-                baseUrl: 'url/2.5',
                 isEmpty: false,
                 errorTitle: '1',
                 errorContent: '1',
@@ -101,8 +100,7 @@ describe('App.InstallerController', function () {
       })
     ]);
     var wizard = Em.Object.create({
-      skipValidationChecked: true,
-      inappropriateUrlForStackVersion: App.WizardStep1Controller.create().inappropriateUrlForStackVersion
+      skipValidationChecked: true
     });
     it ('Should reload installed stacks', function() {
 
@@ -116,8 +114,8 @@ describe('App.InstallerController', function () {
         {
           "isSelected": true,
           "reload": true,
-          "id": "nn-2.5",
-          "stackNameVersion": 'nn-2.5',
+          "id": "nn-cc",
+          "stackNameVersion": 'nn-cc',
           "repositories": [
             {
               "isSelected": true,
@@ -130,7 +128,6 @@ describe('App.InstallerController', function () {
               "isEmpty": false,
               "repositories": [
                 {
-                  "baseUrl": "url/2.5",
                   "isEmpty": false,
                   "errorTitle": "",
                   "errorContent": "",


[34/41] ambari git commit: AMBARI-19114 Update hardcoded stack version warning to be more general than saying hdp (dili)

Posted by jo...@apache.org.
AMBARI-19114 Update hardcoded stack version warning to be more general than saying hdp (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cebdb67c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cebdb67c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cebdb67c

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: cebdb67c5eabc25a619e15288792bd7534ca330d
Parents: 38827c0
Author: Di Li <di...@apache.org>
Authored: Wed Dec 7 11:32:25 2016 -0500
Committer: Di Li <di...@apache.org>
Committed: Wed Dec 7 11:32:25 2016 -0500

----------------------------------------------------------------------
 .../java/org/apache/ambari/server/checks/CheckDescription.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cebdb67c/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
index 7f24bf4..45497b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java
@@ -236,10 +236,10 @@ public class CheckDescription {
 
   public static CheckDescription HARDCODED_STACK_VERSION_PROPERTIES_CHECK = new CheckDescription("HARDCODED_STACK_VERSION_PROPERTIES_CHECK",
     PrereqCheckType.CLUSTER,
-    "Found hardcoded hdp stack version in property value.",
+    "Found hardcoded stack version in property value.",
     new ImmutableMap.Builder<String, String>()
       .put(AbstractCheckDescriptor.DEFAULT,
-          "Some properties seem to contain hardcoded hdp version string \"%s\"." +
+          "Some properties seem to contain hardcoded stack version string \"%s\"." +
           " That is a potential problem when doing stack update.").build());
 
   public static CheckDescription VERSION_MISMATCH = new CheckDescription("VERSION_MISMATCH",


[25/41] ambari git commit: AMBARI-18966. Add check to ensure we do not have @Transactional annotations on private methods. (Attila Doroszlai via stoader)

Posted by jo...@apache.org.
AMBARI-18966. Add check to ensure we do not have @Transactional annotations on private methods. (Attila Doroszlai via stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9170b499
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9170b499
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9170b499

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 9170b49933106a0af651530d827af4f4e73f1541
Parents: 1e261db
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Wed Dec 7 09:52:10 2016 +0100
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Wed Dec 7 09:55:26 2016 +0100

----------------------------------------------------------------------
 ambari-server/checkstyle.xml                       | 17 +++++++++++++++++
 ambari-server/pom.xml                              |  4 ++++
 .../internal/AlertTargetResourceProvider.java      |  2 +-
 .../ClusterStackVersionResourceProvider.java       |  4 ++--
 .../internal/UpgradeResourceProvider.java          |  2 +-
 .../apache/ambari/server/orm/dao/AlertsDAO.java    |  6 +++---
 .../apache/ambari/server/state/ServiceImpl.java    |  2 +-
 .../ambari/server/state/cluster/ClustersImpl.java  |  2 +-
 .../apache/ambari/server/state/host/HostImpl.java  |  2 +-
 .../state/services/RetryUpgradeActionService.java  |  2 +-
 .../svccomphost/ServiceComponentHostImpl.java      |  2 +-
 .../ambari/server/upgrade/UpgradeCatalog240.java   |  4 ++--
 12 files changed, 35 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/checkstyle.xml
----------------------------------------------------------------------
diff --git a/ambari-server/checkstyle.xml b/ambari-server/checkstyle.xml
new file mode 100644
index 0000000..81f6380
--- /dev/null
+++ b/ambari-server/checkstyle.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0"?>
+<!-- Licensed under the Apache License, Version 2.0 (the "License"); you
+  may not use this file except in compliance with the License. You may obtain
+  a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless
+  required by applicable law or agreed to in writing, software distributed
+  under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
+  OR CONDITIONS OF ANY KIND, either express or implied. See the License for
+  the specific language governing permissions and limitations under the License.
+  See accompanying LICENSE file. -->
+<!DOCTYPE module PUBLIC
+  "-//Puppy Crawl//DTD Check Configuration 1.3//EN"
+  "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
+<module name="Checker">
+  <module name="TreeWalker">
+    <module name="AvoidTransactionalOnPrivateMethodsCheck"/>
+  </module>
+</module>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 69ab9d0..e8de84e 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -692,6 +692,10 @@
         <artifactId>jetty-maven-plugin</artifactId>
         <version>${jetty.version}</version>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+      </plugin>
     </plugins>
     <resources>
       <resource>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
index 8e5674e..4010528 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
@@ -374,7 +374,7 @@ public class AlertTargetResourceProvider extends
    */
   @Transactional
   @SuppressWarnings("unchecked")
-  private void updateAlertTargets(long alertTargetId,
+  void updateAlertTargets(long alertTargetId,
       Map<String, Object> requestMap)
       throws AmbariException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 42aae84..d417ec2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -442,7 +442,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
   @Transactional
-  private void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
+  void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
       String desiredRepoVersion, RepositoryVersionState repoState)
       throws AmbariException, SystemException {
     final String clusterName = cluster.getClusterName();
@@ -482,7 +482,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
   @Transactional
-  private RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
+  RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
       List<Host> hosts, RepositoryVersionEntity repoVersionEnt, Map<String, Object> propertyMap)
       throws AmbariException, SystemException {
     final AmbariManagementController managementController = getManagementController();

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 5931743..67be152 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -972,7 +972,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * @throws AmbariException
    */
   @Transactional
-  private UpgradeEntity createUpgradeInsideTransaction(Cluster cluster,
+  UpgradeEntity createUpgradeInsideTransaction(Cluster cluster,
       RequestStageContainer request,
       UpgradeEntity upgradeEntity) throws AmbariException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
index d187947..e8c4b5f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
@@ -1492,7 +1492,7 @@ public class AlertsDAO implements Cleanable {
    * @return a long representing the number of affected (deleted) records
    */
   @Transactional
-  private int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+  int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
     LOG.info("Deleting AlertNotice entities before date " + new Date(beforeDateMillis));
     EntityManager entityManager = m_entityManagerProvider.get();
     List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1523,7 +1523,7 @@ public class AlertsDAO implements Cleanable {
    * @return a long representing the number of affected (deleted) records
    */
   @Transactional
-  private int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
+  int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
     LOG.info("Deleting AlertCurrent entities before date " + new Date(beforeDateMillis));
     EntityManager entityManager = m_entityManagerProvider.get();
     List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1553,7 +1553,7 @@ public class AlertsDAO implements Cleanable {
    */
 
   @Transactional
-  private int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+  int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
     return executeQuery("AlertHistoryEntity.removeInClusterBeforeDate", AlertHistoryEntity.class, clusterId, beforeDateMillis);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index f87b99c..e223eed 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -451,7 +451,7 @@ public class ServiceImpl implements Service {
   }
 
   @Transactional
-  private void persistEntities(ClusterServiceEntity serviceEntity) {
+  void persistEntities(ClusterServiceEntity serviceEntity) {
     long clusterId = cluster.getClusterId();
     ClusterEntity clusterEntity = clusterDAO.findById(clusterId);
     serviceEntity.setClusterEntity(clusterEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index 8e5f37e..1de10f9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -183,7 +183,7 @@ public class ClustersImpl implements Clusters {
    */
   @Inject
   @Transactional
-  private void loadClustersAndHosts() {
+  void loadClustersAndHosts() {
     List<HostEntity> hostEntities = hostDAO.findAll();
     for (HostEntity hostEntity : hostEntities) {
       Host host = hostFactory.create(hostEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index dd5e635..a444f5d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@ -954,7 +954,7 @@ public class HostImpl implements Host {
   }
 
   @Transactional
-  private void persistEntities(HostEntity hostEntity) {
+  void persistEntities(HostEntity hostEntity) {
     hostDAO.create(hostEntity);
     if (!hostEntity.getClusterEntities().isEmpty()) {
       for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java b/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
index 1ea5558..a92aa04 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
@@ -190,7 +190,7 @@ public class RetryUpgradeActionService extends AbstractScheduledService {
    * @param requestId Request Id to search tasks for.
    */
   @Transactional
-  private void retryHoldingCommandsInRequest(Long requestId) {
+  void retryHoldingCommandsInRequest(Long requestId) {
     if (requestId == null) {
       return;
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index c1655aa..5225598 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -1313,7 +1313,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   }
 
   @Transactional
-  private void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity,
+  void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity,
       HostComponentDesiredStateEntity desiredStateEntity) {
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
         serviceComponent.getClusterId(), serviceComponent.getServiceName(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/9170b499/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
index c113fff..b7e248a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
@@ -1563,7 +1563,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
    * @throws SQLException
    */
   @Transactional
-  private void updateServiceComponentDesiredStateTableDDL() throws SQLException {
+  void updateServiceComponentDesiredStateTableDDL() throws SQLException {
     if (dbAccessor.tableHasPrimaryKey(SERVICE_COMPONENT_DS_TABLE, ID)) {
       LOG.info("Skipping {} table Primary Key modifications since the new {} column already exists",
           SERVICE_COMPONENT_DS_TABLE, ID);
@@ -2785,7 +2785,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
    *  instead of cluster_name
    */
   @Transactional
-  private void updateViewInstanceTable() throws SQLException {
+  void updateViewInstanceTable() throws SQLException {
     try {
       if (Long.class.equals(dbAccessor.getColumnClass(VIEWINSTANCE_TABLE, CLUSTER_HANDLE_COLUMN))) {
         LOG.info(String.format("%s column is already numeric. Skipping an update of %s table.", CLUSTER_HANDLE_COLUMN, VIEWINSTANCE_TABLE));


[36/41] ambari git commit: AMBARI-19128. Tooltip is not hidden on host alerts page (onechiporenko)

Posted by jo...@apache.org.
AMBARI-19128. Tooltip is not hidden on host alerts page (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9fc7f0ae
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9fc7f0ae
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9fc7f0ae

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 9fc7f0ae153ec90229fedc62d2fd35b3eb0c5843
Parents: 4bc1a73
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Wed Dec 7 17:03:33 2016 +0200
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Wed Dec 7 19:07:32 2016 +0200

----------------------------------------------------------------------
 .../app/templates/main/host/host_alerts.hbs     |  8 ++--
 .../app/views/main/host/host_alerts_view.js     | 41 +++++++++++---------
 .../views/main/host/host_alerts_view_test.js    |  3 +-
 3 files changed, 28 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9fc7f0ae/ambari-web/app/templates/main/host/host_alerts.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/host/host_alerts.hbs b/ambari-web/app/templates/main/host/host_alerts.hbs
index 17c8bc3..2b02388 100644
--- a/ambari-web/app/templates/main/host/host_alerts.hbs
+++ b/ambari-web/app/templates/main/host/host_alerts.hbs
@@ -56,9 +56,11 @@
             <td>
               {{template "templates/main/alerts/alert_instance/status"}}
             </td>
-            <td><span
-                    class="alert-text" {{bindAttr data-original-title="instance.escapeSpecialCharactersFromTooltip"}}
-                    class="alert-text">{{instance.text}}</span></td>
+            <td>
+              <span class="alert-text" {{bindAttr data-original-title="instance.escapeSpecialCharactersFromTooltip"}}>
+                {{instance.text}}
+              </span>
+            </td>
           </tr>
         {{/each}}
       {{else}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fc7f0ae/ambari-web/app/views/main/host/host_alerts_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/host_alerts_view.js b/ambari-web/app/views/main/host/host_alerts_view.js
index 34a2a3e..fd0979f 100644
--- a/ambari-web/app/views/main/host/host_alerts_view.js
+++ b/ambari-web/app/views/main/host/host_alerts_view.js
@@ -46,10 +46,9 @@ App.MainHostAlertsView = App.TableView.extend({
         }
       });
       return [].concat(criticalAlerts, warningAlerts, okAlerts, otherAlerts);
-    } else {
-      return [];
     }
-  }.property('controller.content.@each'),
+    return [];
+  }.property('controller.content.[]'),
 
   willInsertElement: function () {
     var hostName = this.get('parentView.controller.content.hostName');
@@ -223,22 +222,16 @@ App.MainHostAlertsView = App.TableView.extend({
    * @type {string}
    */
   paginationLeftClass: function () {
-    if (this.get("startIndex") > 1) {
-      return "paginate_previous";
-    }
-    return "paginate_disabled_previous";
-  }.property("startIndex", 'filteredCount'),
+    return this.get('startIndex') > 1 ? 'paginate_previous' : 'paginate_disabled_previous';
+  }.property('startIndex', 'filteredCount'),
 
   /**
    * Determines how display "next"-link - as link or text
    * @type {string}
    */
   paginationRightClass: function () {
-    if ((this.get("endIndex")) < this.get("filteredCount")) {
-      return "paginate_next";
-    }
-    return "paginate_disabled_next";
-  }.property("endIndex", 'filteredCount'),
+    return this.get('endIndex') < this.get('filteredCount') ? 'paginate_next' : 'paginate_disabled_next';
+  }.property('endIndex', 'filteredCount'),
 
   /**
    * Show previous-page if user not in the first page
@@ -266,12 +259,12 @@ App.MainHostAlertsView = App.TableView.extend({
    */
   tooltipsUpdater: function () {
     Em.run.once(this,this.tooltipsUpdaterOnce);
-  }.observes('pageContent.@each'),
+  }.observes('pageContent.[]'),
 
   tooltipsUpdaterOnce: function() {
     var self = this;
     Em.run.next(this, function () {
-      App.tooltip(self.$(".enable-disable-button, .timeago, .alert-text"));
+      App.tooltip(self.$('.timeago, .alert-text'));
     });
   },
 
@@ -281,14 +274,24 @@ App.MainHostAlertsView = App.TableView.extend({
   clearFilters: function() {
     this.set('filterConditions', []);
     this.get('childViews').forEach(function(childView) {
-      if (childView['clearFilter']) {
-        childView.clearFilter();
-      }
+      Em.tryInvoke(childView, 'clearFilter');
     });
   },
 
+  /**
+   * Tooltips should be removed if some filter is applied or cleared
+   *
+   * @method clearTooltips
+   */
+  clearTooltips: function () {
+    var $elements = this.$('.timeago, .alert-text');
+    if ($elements) {
+      $elements.tooltip('destroy');
+    }
+  }.observes('filteredCount'),
+
   willDestroyElement: function() {
-    this.$(".enable-disable-button, .timeago, .alert-text").tooltip('destroy');
+    this.clearTooltips();
   }
 
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/9fc7f0ae/ambari-web/test/views/main/host/host_alerts_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/host/host_alerts_view_test.js b/ambari-web/test/views/main/host/host_alerts_view_test.js
index 2c0d59a..f8d2e95 100644
--- a/ambari-web/test/views/main/host/host_alerts_view_test.js
+++ b/ambari-web/test/views/main/host/host_alerts_view_test.js
@@ -186,7 +186,6 @@ describe('App.MainHostAlertsView', function () {
     });
   });
 
-
   describe("#paginationLeftClass", function() {
 
     it("startIndex is 2", function() {
@@ -271,7 +270,7 @@ describe('App.MainHostAlertsView', function () {
 
     it("tooltip should be called", function() {
       view.willDestroyElement();
-      expect(view.$.calledWith(".enable-disable-button, .timeago, .alert-text")).to.be.true;
+      expect(view.$.calledWith('.timeago, .alert-text')).to.be.true;
       expect(mock.tooltip.calledWith('destroy')).to.be.true;
     });
   });


[26/41] ambari git commit: Revert "AMBARI-18966. Add check to ensure we do not have @Transactional annotations on private methods. (Attila Doroszlai via stoader)"

Posted by jo...@apache.org.
Revert "AMBARI-18966. Add check to ensure we do not have @Transactional annotations on private methods. (Attila Doroszlai via stoader)"

This reverts commit 9170b49933106a0af651530d827af4f4e73f1541.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6806edcd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6806edcd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6806edcd

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 6806edcd94dd5568b8a21ec757fa7fa668eec71c
Parents: 9170b49
Author: Toader, Sebastian <st...@hortonworks.com>
Authored: Wed Dec 7 10:44:23 2016 +0100
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Wed Dec 7 10:44:23 2016 +0100

----------------------------------------------------------------------
 ambari-server/checkstyle.xml                       | 17 -----------------
 ambari-server/pom.xml                              |  4 ----
 .../internal/AlertTargetResourceProvider.java      |  2 +-
 .../ClusterStackVersionResourceProvider.java       |  4 ++--
 .../internal/UpgradeResourceProvider.java          |  2 +-
 .../apache/ambari/server/orm/dao/AlertsDAO.java    |  6 +++---
 .../apache/ambari/server/state/ServiceImpl.java    |  2 +-
 .../ambari/server/state/cluster/ClustersImpl.java  |  2 +-
 .../apache/ambari/server/state/host/HostImpl.java  |  2 +-
 .../state/services/RetryUpgradeActionService.java  |  2 +-
 .../svccomphost/ServiceComponentHostImpl.java      |  2 +-
 .../ambari/server/upgrade/UpgradeCatalog240.java   |  4 ++--
 12 files changed, 14 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/checkstyle.xml
----------------------------------------------------------------------
diff --git a/ambari-server/checkstyle.xml b/ambari-server/checkstyle.xml
deleted file mode 100644
index 81f6380..0000000
--- a/ambari-server/checkstyle.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0"?>
-<!-- Licensed under the Apache License, Version 2.0 (the "License"); you
-  may not use this file except in compliance with the License. You may obtain
-  a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless
-  required by applicable law or agreed to in writing, software distributed
-  under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
-  OR CONDITIONS OF ANY KIND, either express or implied. See the License for
-  the specific language governing permissions and limitations under the License.
-  See accompanying LICENSE file. -->
-<!DOCTYPE module PUBLIC
-  "-//Puppy Crawl//DTD Check Configuration 1.3//EN"
-  "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
-<module name="Checker">
-  <module name="TreeWalker">
-    <module name="AvoidTransactionalOnPrivateMethodsCheck"/>
-  </module>
-</module>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index e8de84e..69ab9d0 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -692,10 +692,6 @@
         <artifactId>jetty-maven-plugin</artifactId>
         <version>${jetty.version}</version>
       </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-      </plugin>
     </plugins>
     <resources>
       <resource>

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
index 4010528..8e5674e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AlertTargetResourceProvider.java
@@ -374,7 +374,7 @@ public class AlertTargetResourceProvider extends
    */
   @Transactional
   @SuppressWarnings("unchecked")
-  void updateAlertTargets(long alertTargetId,
+  private void updateAlertTargets(long alertTargetId,
       Map<String, Object> requestMap)
       throws AmbariException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index d417ec2..42aae84 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -442,7 +442,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
   @Transactional
-  void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
+  private void createHostVersions(Cluster cluster, List<Host> hosts, StackId stackId,
       String desiredRepoVersion, RepositoryVersionState repoState)
       throws AmbariException, SystemException {
     final String clusterName = cluster.getClusterName();
@@ -482,7 +482,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
   @Transactional
-  RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
+  private RequestStageContainer createOrchestration(Cluster cluster, StackId stackId,
       List<Host> hosts, RepositoryVersionEntity repoVersionEnt, Map<String, Object> propertyMap)
       throws AmbariException, SystemException {
     final AmbariManagementController managementController = getManagementController();

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 67be152..5931743 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -972,7 +972,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * @throws AmbariException
    */
   @Transactional
-  UpgradeEntity createUpgradeInsideTransaction(Cluster cluster,
+  private UpgradeEntity createUpgradeInsideTransaction(Cluster cluster,
       RequestStageContainer request,
       UpgradeEntity upgradeEntity) throws AmbariException {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
index e8c4b5f..d187947 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/AlertsDAO.java
@@ -1492,7 +1492,7 @@ public class AlertsDAO implements Cleanable {
    * @return a long representing the number of affected (deleted) records
    */
   @Transactional
-  int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+  private int cleanAlertNoticesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
     LOG.info("Deleting AlertNotice entities before date " + new Date(beforeDateMillis));
     EntityManager entityManager = m_entityManagerProvider.get();
     List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1523,7 +1523,7 @@ public class AlertsDAO implements Cleanable {
    * @return a long representing the number of affected (deleted) records
    */
   @Transactional
-  int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
+  private int cleanAlertCurrentsForClusterBeforeDate(long clusterId, long beforeDateMillis) {
     LOG.info("Deleting AlertCurrent entities before date " + new Date(beforeDateMillis));
     EntityManager entityManager = m_entityManagerProvider.get();
     List<Integer> ids = findAllAlertHistoryIdsBeforeDate(clusterId, beforeDateMillis);
@@ -1553,7 +1553,7 @@ public class AlertsDAO implements Cleanable {
    */
 
   @Transactional
-  int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
+  private int cleanAlertHistoriesForClusterBeforeDate(Long clusterId, long beforeDateMillis) {
     return executeQuery("AlertHistoryEntity.removeInClusterBeforeDate", AlertHistoryEntity.class, clusterId, beforeDateMillis);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index e223eed..f87b99c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -451,7 +451,7 @@ public class ServiceImpl implements Service {
   }
 
   @Transactional
-  void persistEntities(ClusterServiceEntity serviceEntity) {
+  private void persistEntities(ClusterServiceEntity serviceEntity) {
     long clusterId = cluster.getClusterId();
     ClusterEntity clusterEntity = clusterDAO.findById(clusterId);
     serviceEntity.setClusterEntity(clusterEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index 1de10f9..8e5f37e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -183,7 +183,7 @@ public class ClustersImpl implements Clusters {
    */
   @Inject
   @Transactional
-  void loadClustersAndHosts() {
+  private void loadClustersAndHosts() {
     List<HostEntity> hostEntities = hostDAO.findAll();
     for (HostEntity hostEntity : hostEntities) {
       Host host = hostFactory.create(hostEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index a444f5d..dd5e635 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@ -954,7 +954,7 @@ public class HostImpl implements Host {
   }
 
   @Transactional
-  void persistEntities(HostEntity hostEntity) {
+  private void persistEntities(HostEntity hostEntity) {
     hostDAO.create(hostEntity);
     if (!hostEntity.getClusterEntities().isEmpty()) {
       for (ClusterEntity clusterEntity : hostEntity.getClusterEntities()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java b/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
index a92aa04..1ea5558 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/services/RetryUpgradeActionService.java
@@ -190,7 +190,7 @@ public class RetryUpgradeActionService extends AbstractScheduledService {
    * @param requestId Request Id to search tasks for.
    */
   @Transactional
-  void retryHoldingCommandsInRequest(Long requestId) {
+  private void retryHoldingCommandsInRequest(Long requestId) {
     if (requestId == null) {
       return;
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index 5225598..c1655aa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -1313,7 +1313,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   }
 
   @Transactional
-  void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity,
+  private void persistEntities(HostEntity hostEntity, HostComponentStateEntity stateEntity,
       HostComponentDesiredStateEntity desiredStateEntity) {
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
         serviceComponent.getClusterId(), serviceComponent.getServiceName(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/6806edcd/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
index b7e248a..c113fff 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
@@ -1563,7 +1563,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
    * @throws SQLException
    */
   @Transactional
-  void updateServiceComponentDesiredStateTableDDL() throws SQLException {
+  private void updateServiceComponentDesiredStateTableDDL() throws SQLException {
     if (dbAccessor.tableHasPrimaryKey(SERVICE_COMPONENT_DS_TABLE, ID)) {
       LOG.info("Skipping {} table Primary Key modifications since the new {} column already exists",
           SERVICE_COMPONENT_DS_TABLE, ID);
@@ -2785,7 +2785,7 @@ public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
    *  instead of cluster_name
    */
   @Transactional
-  void updateViewInstanceTable() throws SQLException {
+  private void updateViewInstanceTable() throws SQLException {
     try {
       if (Long.class.equals(dbAccessor.getColumnClass(VIEWINSTANCE_TABLE, CLUSTER_HANDLE_COLUMN))) {
         LOG.info(String.format("%s column is already numeric. Skipping an update of %s table.", CLUSTER_HANDLE_COLUMN, VIEWINSTANCE_TABLE));


[22/41] ambari git commit: AMBARI-19094. HDP 3.0 support for YARN/MR with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 0000000..e33b91d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,814 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration supports_final="true">
+  <!-- These configs were inherited from HDP 2.1 -->
+  <property>
+    <name>yarn.timeline-service.enabled</name>
+    <value>true</value>
+    <description>Indicate to clients whether timeline service is enabled or not.
+      If enabled, clients will put entities and events to the timeline server.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.generic-application-history.store-class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
+    <description>
+      Store class name for history store, defaulting to file system store
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.address</name>
+    <value>localhost:8188</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.webapp.https.address</name>
+    <value>localhost:8190</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.address</name>
+    <value>localhost:10200</value>
+    <description>
+      This is default address for the timeline server to start
+      the RPC server.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <description>Time to live for timeline store data in milliseconds.</description>
+    <name>yarn.timeline-service.ttl-ms</name>
+    <value>2678400000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
+    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
+    <value>300000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
+    <description>Classpath for typical applications.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.registry.rm.enabled</name>
+    <value>false</value>
+    <description>
+      Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.registry.zk.quorum</name>
+    <value>localhost:2181</value>
+    <description>
+      List of hostname:port pairs defining the zookeeper quorum binding for the registry
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>Enable the node manager to recover after starting</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.dir</name>
+    <value>{{yarn_log_dir_prefix}}/nodemanager/recovery-state</value>
+    <description>
+      The local filesystem directory in which the node manager will store
+      state when recovery is enabled.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
+    <value>10000</value>
+    <description>Time interval between each attempt to connect to NM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.max-wait-ms</name>
+    <value>60000</value>
+    <description>Max time to wait to establish a connection to NM</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM to recover state after starting.
+      If true, then yarn.resourcemanager.store.class must be specified.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
+    </description>
+    <display-name>Enable Work Preserving Restart</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.store.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+    <description>
+      The class to use as the persistent store.
+      If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
+      the store is implicitly fenced; meaning a single ResourceManager
+      is able to use the store at any point in time.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-address</name>
+    <value>localhost:2181</value>
+    <description>
+      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-state-store.parent-path</name>
+    <value>/rmstore</value>
+    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-acl</name>
+    <value>world:anyone:rwcda</value>
+    <description>ACL's to be used for ZooKeeper znodes.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
+    <value>10000</value>
+    <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
+    <value>30000</value>
+    <description>How often to try connecting to the ResourceManager.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.max-wait.ms</name>
+    <value>900000</value>
+    <description>Maximum time to wait to establish connection to ResourceManager</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-retry-interval-ms</name>
+    <value>1000</value>
+    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
+      When HA is enabled, the value here is NOT used. It is generated
+      automatically from yarn.resourcemanager.zk-timeout-ms and
+      yarn.resourcemanager.zk-num-retries."
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-num-retries</name>
+    <value>1000</value>
+    <description>Number of times RM tries to connect to ZooKeeper.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-timeout-ms</name>
+    <value>10000</value>
+    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.state-store.max-completed-applications</name>
+    <value>${yarn.resourcemanager.max-completed-applications}</value>
+    <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.uri</name>
+    <value> </value>
+    <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.ha.enabled</name>
+    <value>false</value>
+    <description>enable RM HA or not</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
+    <description>Pre-requisite to use CGroups</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
+    <value>hadoop-yarn</value>
+    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
+    <value>false</value>
+    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
+    <value>/cgroup</value>
+    <description>Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched.</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
+    <value>false</value>
+    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.cpu-vcores</name>
+    <value>8</value>
+    <description>Number of vcores that can be allocated
+      for containers. This is used by the RM scheduler when allocating
+      resources for containers. This is not used to limit the number of
+      CPUs used by YARN containers. If it is set to -1 and
+      yarn.nodemanager.resource.detect-hardware-capabilities is true, it is
+      automatically determined from the hardware in case of Windows and Linux.
+      In other cases, number of vcores is 8 by default.
+    </description>
+    <display-name>Number of virtual cores</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>32</maximum>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+    <value>80</value>
+    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
+    <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>100</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description>
+      Retry policy used for FileSystem node label store. The policy is
+      specified by N pairs of sleep-time in milliseconds and number-of-retries
+      &quot;s1,n1,s2,n2,...&quot;.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
+    <value>1000</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
+    <value>90</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+    <value>-1</value>
+    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
+    <value>false</value>
+    <description>
+      This configuration is for debug and test purpose.
+      By setting this configuration as true.
+      We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
+    <value>30</value>
+    <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
+    <value>true</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
+    <value>10</value>
+    <description>Number of worker threads that send the yarn system metrics data.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.max-retries</name>
+    <value>30</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.retry-interval-ms</name>
+    <value>1000</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.ttl-enable</name>
+    <value>true</value>
+    <description>
+      Enable age off of timeline store data.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.state-store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore</value>
+    <description>Store class name for timeline state store.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-state-store.path</name>
+    <value>/hadoop/yarn/timeline</value>
+    <description>Store file name for leveldb state store.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
+    <value>/hadoop/yarn/timeline</value>
+    <description>Store file name for leveldb timeline store.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
+    <value>104857600</value>
+    <description>
+      Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently read entity start times for leveldb timeline store in number of entities.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently written entity start times for leveldb timeline store in number of entities.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.type</name>
+    <value>simple</value>
+    <description>
+      Defines authentication used for the Timeline Server HTTP endpoint.
+      Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
+    <value>false</value>
+    <description>
+      Flag to enable override of the default kerberos authentication filter with
+      the RM authentication filter to allow authentication using delegation
+      tokens(fallback to kerberos if the tokens are missing).
+      Only applicable when the http authentication type is kerberos.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.root-dir</name>
+    <value>/system/yarn/node-labels</value>
+    <description>
+      URI for NodeLabelManager.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-vcores</name>
+    <value>1</value>
+    <description/>
+    <display-name>Minimum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-vcores</name>
+    <value>8</value>
+    <description/>
+    <display-name>Maximum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.node-labels.enabled</name>
+    <value>false</value>
+    <description>
+      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
+    </description>
+    <display-name>Node Labels</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+      <property>
+        <type>core-site</type>
+        <name>hadoop.security.authentication</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+      <property>
+        <type>cluster-env</type>
+        <name>user_group</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
+    <description>
+      Enable a set of periodic monitors (specified in
+      yarn.resourcemanager.scheduler.monitor.policies) that affect the
+      scheduler.
+    </description>
+    <value>false</value>
+    <display-name>Pre-emption</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- In HDP 2.3, these properties were deleted:
+  yarn.node-labels.manager-class
+  -->
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>yarn.timeline-service.recovery.enabled</name>
+    <description>
+      Enable timeline server to recover state after starting. If
+      true, then yarn.timeline-service.state-store-class must be specified.
+    </description>
+    <value>true</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.authorization-provider</name>
+    <description> Yarn authorization provider class. </description>
+    <depends-on>
+      <property>
+        <type>ranger-yarn-plugin-properties</type>
+        <name>ranger-yarn-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.admin.acl</name>
+    <value>yarn</value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!--ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.version</name>
+    <value>1.5</value>
+    <description>Timeline service version we&#x2019;re currently using.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore</value>
+    <description>Main storage class for YARN timeline server.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>
+    <value>/ats/active/</value>
+    <description>DFS path to store active application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.done-dir</name>
+    <value>/ats/done/</value>
+    <description>DFS path to store done application&#x2019;s timeline data</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes</name>
+    <value/>
+    <description>Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- advanced ats v1.5 properties-->
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.summary-store</name>
+    <description>Summary storage for ATS v1.5</description>
+    <!-- Use rolling leveldb, advanced -->
+    <value>org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.scan-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage reader.This
+      value controls how frequent the reader will scan the HDFS active directory
+      for application status.
+    </description>
+    <!-- Default is 60 seconds, advanced -->
+    <value>60</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds</name>
+    <description>
+      Scan interval for ATS v1.5 entity group file system storage cleaner.This
+      value controls how frequent the reader will scan the HDFS done directory
+      for stale application data.
+    </description>
+    <!-- 3600 is default, advanced -->
+    <value>3600</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.retain-seconds</name>
+    <description>
+      How long the ATS v1.5 entity group file system storage will keep an
+      application's data in the done directory.
+    </description>
+    <!-- 7 days is default, advanced -->
+    <value>604800</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.4 -->
+  <property>
+    <name>yarn.nodemanager.aux-services.spark_shuffle.class</name>
+    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
+    <description>The auxiliary service class to use for Spark</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle,spark_shuffle,spark2_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.spark2_shuffle.class</name>
+    <value>org.apache.spark.network.yarn.YarnShuffleService</value>
+    <description>The auxiliary service class to use for Spark 2</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.spark_shuffle.classpath</name>
+    <value>{{stack_root}}/${hdp.version}/spark/aux/*</value>
+    <description>The auxiliary service classpath to use for Spark</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.aux-services.spark2_shuffle.classpath</name>
+    <value>{{stack_root}}/${hdp.version}/spark2/aux/*</value>
+    <description>The auxiliary service classpath to use for Spark 2</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
+    <value>3600</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.nodemanager.container-metrics.unregister-delay-ms</name>
+    <value>60000</value>
+    <description>The delay time ms to unregister container metrics after completion.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath</name>
+    <value/>
+    <description>Classpath for all plugins defined in yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
new file mode 100644
index 0000000..e690204
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/kerberos.json
@@ -0,0 +1,278 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
+              },
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "llap_zk_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            },
+            {
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
new file mode 100644
index 0000000..7e1fd78
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/metainfo.xml
@@ -0,0 +1,173 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <displayName>YARN</displayName>
+      <version>2.7.1.3.0</version>
+      <extends>common-services/YARN/2.1.0.2.0</extends>
+
+      <components>
+        <component>
+          <name>APP_TIMELINE_SERVER</name>
+          <displayName>App Timeline Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <reassignAllowed>true</reassignAllowed>
+
+          <commandScript>
+            <script>scripts/application_timeline_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+
+          <dependencies>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SPARK/SPARK_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <category>MASTER</category>
+          <cardinality>1-2</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+
+          <dependencies>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <configuration-dependencies>
+            <config-type>capacity-scheduler</config-type>
+          </configuration-dependencies>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_${stack_version}-yarn</name>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-mapreduce</name>
+            </package>
+            <package>
+              <name>hadoop_${stack_version}-hdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-${stack_version}-yarn</name>
+            </package>
+            <package>
+              <name>hadoop-${stack_version}-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>yarn-env</config-type>
+        <config-type>core-site</config-type>
+        <config-type>yarn-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <displayName>MapReduce2</displayName>
+      <version>2.7.1.3.0</version>
+      <configuration-dir>configuration-mapred</configuration-dir>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,redhat7,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_${stack_version}-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-${stack_version}-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <themes-dir>themes-mapred</themes-dir>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <quickLinksConfigurations-dir>quicklinks-mapred</quickLinksConfigurations-dir>
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+    </service>
+  </services>
+</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
new file mode 100644
index 0000000..5ffbc07
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks-mapred/quicklinks.json
@@ -0,0 +1,80 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"mapreduce.jobhistory.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"mapred-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "jobhistory_ui",
+        "label": "JobHistory UI",
+        "requires_user_name": "false",
+        "component_name": "HISTORYSERVER",
+        "url": "%@://%@:%@",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      },
+      {
+        "name": "jobhistory_logs",
+        "label": "JobHistory logs",
+        "requires_user_name": "false",
+        "component_name": "HISTORYSERVER",
+        "url": "%@://%@:%@/logs",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      },
+      {
+        "name":"jobhistory_jmx",
+        "label":"JobHistory JMX",
+        "requires_user_name":"false",
+        "component_name": "HISTORYSERVER",
+        "url":"%@://%@:%@/jmx",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      },
+      {
+        "name":"thread_stacks",
+        "label":"Thread Stacks",
+        "requires_user_name": "false",
+        "component_name": "HISTORYSERVER",
+        "url":"%@://%@:%@/stacks",
+        "port":{
+          "http_property": "mapreduce.jobhistory.webapp.address",
+          "http_default_port": "19888",
+          "https_property": "mapreduce.jobhistory.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "mapred-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
new file mode 100644
index 0000000..37248d0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/quicklinks/quicklinks.json
@@ -0,0 +1,80 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"yarn.http.policy",
+          "desired":"HTTPS_ONLY",
+          "site":"yarn-site"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "resourcemanager_ui",
+        "label": "ResourceManager UI",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url": "%@://%@:%@",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      },
+      {
+        "name": "resourcemanager_logs",
+        "label": "ResourceManager logs",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url": "%@://%@:%@/logs",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      },
+      {
+        "name": "resourcemanager_jmx",
+        "label":"ResourceManager JMX",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url":"%@://%@:%@/jmx",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      },
+      {
+        "name": "thread_stacks",
+        "label":"Thread Stacks",
+        "requires_user_name": "false",
+        "component_name": "RESOURCEMANAGER",
+        "url":"%@://%@:%@/stacks",
+        "port":{
+          "http_property": "yarn.resourcemanager.webapp.address",
+          "http_default_port": "8088",
+          "https_property": "yarn.resourcemanager.webapp.https.address",
+          "https_default_port": "8090",
+          "regex": "\\w*:(\\d+)",
+          "site": "yarn-site"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json
new file mode 100644
index 0000000..5019447
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes-mapred/theme.json
@@ -0,0 +1,132 @@
+{
+  "name": "default",
+  "description": "Default theme for MAPREDUCE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-mr-scheduler",
+                  "display-name": "MapReduce",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-mr-scheduler-row1-col1",
+                      "display-name": "MapReduce Framework",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row2-col1",
+                      "display-name": "MapReduce AppMaster",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "3"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "mapred-site/mapreduce.map.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.reduce.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col2"
+        },
+        {
+          "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+          "subsection-name": "subsection-mr-scheduler-row2-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.task.io.sort.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col3"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "mapred-site/mapreduce.map.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.reduce.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.task.io.sort.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/a62119ab/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json
new file mode 100644
index 0000000..758cf0c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/YARN/themes/theme.json
@@ -0,0 +1,250 @@
+{
+  "name": "default",
+  "description": "Default theme for YARN service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-nm-sizing",
+                  "display-name": "Memory",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-nm-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-nm-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-yarn-platform-features",
+                  "display-name": "YARN Features",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-yarn-platform-features-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-container-sizing",
+                  "display-name": "CPU",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-container-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-container-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+          "subsection-name": "subsection-nm-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.node-labels.enabled",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-env/yarn_cgroups_enabled",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.node-labels.enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-env/yarn_cgroups_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}


[38/41] ambari git commit: AMBARI-18668: Workflow Designer View: Tooltip for Edit button shows Back (sangeetar)

Posted by jo...@apache.org.
AMBARI-18668: Workflow Designer View: Tooltip for Edit button shows Back (sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d56576d7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d56576d7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d56576d7

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: d56576d78a3e83f7ee5550e81531d4b13d8fceeb
Parents: 99afb8b
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Wed Dec 7 10:28:27 2016 -0800
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Wed Dec 7 10:28:27 2016 -0800

----------------------------------------------------------------------
 .../src/main/resources/ui/app/templates/components/job-details.hbs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d56576d7/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
index 0a04d25..f17f89e 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
@@ -52,7 +52,7 @@
             <li {{action 'getJobDag'}} role="presentation"><a href="#jobDag" aria-controls="jobDag" role="tab" data-toggle="tab">Flow Graph</a></li>
             <li role="presentation" class="pull-right">
                 {{#link-to 'design' (query-params appPath=model.appPath) class="backto-designer" }}
-                    <button type="button" class="btn btn-success" title="Back" >
+                    <button type="button" class="btn btn-success" title="Edit Workflow" >
                        Edit Workflow
                     </button>
                 {{/link-to}}


[03/41] ambari git commit: AMBARI-19086. LDAP sync creates groups with Local type (Attila Doroszla via rlevas)

Posted by jo...@apache.org.
AMBARI-19086. LDAP sync creates groups with Local type (Attila Doroszla via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a00ffffc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a00ffffc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a00ffffc

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: a00ffffc54a8063c1728d2f3ba24db04504864da
Parents: b5122e2
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Mon Dec 5 16:42:14 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Mon Dec 5 16:42:14 2016 -0500

----------------------------------------------------------------------
 .../apache/ambari/server/orm/entities/GroupEntity.java    |  5 +++--
 .../ambari/server/security/authorization/Users.java       | 10 ++++------
 .../server/security/ldap/AmbariLdapDataPopulatorTest.java |  3 ++-
 3 files changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a00ffffc/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
index 58b2e5d..dc71b61 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/GroupEntity.java
@@ -101,7 +101,7 @@ public class GroupEntity {
     return ldapGroup == 0 ? Boolean.FALSE : Boolean.TRUE;
   }
 
-  public void setLdapGroup(Boolean ldapGroup) {
+  private void setLdapGroup(Boolean ldapGroup) {
     if (ldapGroup == null) {
       this.ldapGroup = null;
     } else {
@@ -113,8 +113,9 @@ public class GroupEntity {
     return groupType;
   }
 
-  public void setgroupType(GroupType groupType) {
+  public void setGroupType(GroupType groupType) {
     this.groupType = groupType;
+    setLdapGroup(groupType == GroupType.LDAP);
   }
 
   public Set<MemberEntity> getMemberEntities() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00ffffc/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
index 2cd538c..e69bbc9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/Users.java
@@ -49,10 +49,8 @@ import org.apache.ambari.server.orm.entities.PermissionEntity;
 import org.apache.ambari.server.orm.entities.PrincipalEntity;
 import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
 import org.apache.ambari.server.orm.entities.PrivilegeEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
 import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
-import org.apache.ambari.server.security.ClientSecurityType;
 import org.apache.ambari.server.security.ldap.LdapBatchDto;
 import org.apache.ambari.server.security.ldap.LdapUserGroupMemberDto;
 import org.apache.commons.lang.StringUtils;
@@ -277,7 +275,7 @@ public class Users {
   public synchronized void setGroupLdap(String groupName) throws AmbariException {
     GroupEntity groupEntity = groupDAO.findGroupByName(groupName);
     if (groupEntity != null) {
-      groupEntity.setLdapGroup(true);
+      groupEntity.setGroupType(GroupType.LDAP);
       groupDAO.merge(groupEntity);
     } else {
       throw new AmbariException("Group " + groupName + " doesn't exist");
@@ -435,7 +433,7 @@ public class Users {
     final GroupEntity groupEntity = new GroupEntity();
     groupEntity.setGroupName(groupName);
     groupEntity.setPrincipal(principalEntity);
-    groupEntity.setgroupType(groupType);
+    groupEntity.setGroupType(groupType);
 
     groupDAO.create(groupEntity);
   }
@@ -701,7 +699,7 @@ public class Users {
     final Set<GroupEntity> groupsToBecomeLdap = new HashSet<GroupEntity>();
     for (String groupName : batchInfo.getGroupsToBecomeLdap()) {
       final GroupEntity groupEntity = groupDAO.findGroupByName(groupName);
-      groupEntity.setLdapGroup(true);
+      groupEntity.setGroupType(GroupType.LDAP);
       allGroups.put(groupEntity.getGroupName(), groupEntity);
       groupsToBecomeLdap.add(groupEntity);
     }
@@ -737,7 +735,7 @@ public class Users {
       final GroupEntity groupEntity = new GroupEntity();
       groupEntity.setGroupName(groupName);
       groupEntity.setPrincipal(principalEntity);
-      groupEntity.setLdapGroup(true);
+      groupEntity.setGroupType(GroupType.LDAP);
 
       allGroups.put(groupEntity.getGroupName(), groupEntity);
       groupsToCreate.add(groupEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/a00ffffc/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
index 1866b12..2840e3d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/ldap/AmbariLdapDataPopulatorTest.java
@@ -38,6 +38,7 @@ import org.apache.ambari.server.orm.entities.PrivilegeEntity;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.security.authorization.AmbariLdapUtils;
 import org.apache.ambari.server.security.authorization.Group;
+import org.apache.ambari.server.security.authorization.GroupType;
 import org.apache.ambari.server.security.authorization.LdapServerProperties;
 import org.apache.ambari.server.security.authorization.User;
 import org.apache.ambari.server.security.authorization.Users;
@@ -1576,7 +1577,7 @@ public class AmbariLdapDataPopulatorTest {
     final GroupEntity ldapGroup = new GroupEntity();
     ldapGroup.setGroupId(1);
     ldapGroup.setGroupName("ldapGroup");
-    ldapGroup.setLdapGroup(true);
+    ldapGroup.setGroupType(GroupType.LDAP);
     ldapGroup.setMemberEntities(new HashSet<MemberEntity>());
 
     final User ldapUserWithoutGroup = createLdapUserWithoutGroup();


[16/41] ambari git commit: AMBARI-19101 : Merge AMS service check support for HA from trunk. (dsen, avijayan via avijayan)

Posted by jo...@apache.org.
AMBARI-19101 : Merge AMS service check support for HA from trunk. (dsen, avijayan via avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9a938e6c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9a938e6c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9a938e6c

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 9a938e6cc27811057895d495f700a8e874c69a10
Parents: 6418861
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Tue Dec 6 12:50:45 2016 -0800
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Tue Dec 6 12:50:54 2016 -0800

----------------------------------------------------------------------
 .../ambari_commons/ambari_metrics_helper.py     | 26 ++++++++++----------
 .../package/scripts/metrics_grafana_util.py     |  2 +-
 .../0.1.0/package/scripts/service_check.py      |  2 +-
 .../2.1/hooks/before-START/scripts/params.py    |  2 +-
 4 files changed, 16 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9a938e6c/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py b/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
index f6f4068..bfc786c 100644
--- a/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
+++ b/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
@@ -29,15 +29,15 @@ def select_metric_collector_for_sink(sink_name):
   # TODO check '*' sink_name
 
   all_collectors_string = get_metric_collectors_from_properties_file(sink_name)
-  if all_collectors_string:
-    all_collectors_list = all_collectors_string.split(',')
-    return select_metric_collector_hosts_from_hostnames(all_collectors_list)
+  return select_metric_collector_hosts_from_hostnames(all_collectors_string)
+
+def select_metric_collector_hosts_from_hostnames(comma_separated_hosts):
+  if comma_separated_hosts:
+    hosts = comma_separated_hosts.split(',')
+    return get_random_host(hosts)
   else:
     return 'localhost'
 
-def select_metric_collector_hosts_from_hostnames(hosts):
-  return get_random_host(hosts)
-
 def get_random_host(hosts):
   return random.choice(hosts)
 
@@ -53,10 +53,10 @@ def load_properties_from_file(filepath, sep='=', comment_char='#'):
   props = {}
   with open(filepath, "rt") as f:
     for line in f:
-        l = line.strip()
-        if l and not l.startswith(comment_char):
-          key_value = l.split(sep)
-          key = key_value[0].strip()
-          value = sep.join(key_value[1:]).strip('" \t')
-          props[key] = value
-  return props
+      l = line.strip()
+      if l and not l.startswith(comment_char):
+        key_value = l.split(sep)
+        key = key_value[0].strip()
+        value = sep.join(key_value[1:]).strip('" \t')
+        props[key] = value
+  return props
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/9a938e6c/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
index 41bd2b4..75740f8 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
@@ -215,7 +215,7 @@ def create_ams_datasource():
   Create AMS datasource in Grafana, if exsists make sure the collector url is accurate
   """
   Logger.info("Trying to find working metric collector")
-  results = execute_in_parallel(do_ams_collector_post, params.ams_collector_hosts, params)
+  results = execute_in_parallel(do_ams_collector_post, params.ams_collector_hosts.split(','), params)
   new_datasource_host = ""
 
   for host in params.ams_collector_hosts:

http://git-wip-us.apache.org/repos/asf/ambari/blob/9a938e6c/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
index b4e9b8f..305fe73 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/service_check.py
@@ -144,7 +144,7 @@ class AMSServiceCheck(Script):
     Logger.info("Ambari Metrics service check was started.")
     env.set_params(params)
 
-    results = execute_in_parallel(self.service_check_for_single_host, params.ams_collector_hosts, params)
+    results = execute_in_parallel(self.service_check_for_single_host, params.ams_collector_hosts.split(','), params)
 
     for host in str(params.ams_collector_hosts).split(","):
       if host in results:

http://git-wip-us.apache.org/repos/asf/ambari/blob/9a938e6c/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
index a22eb90..1058c75 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py
@@ -29,7 +29,7 @@ if has_metric_collector:
       'metrics_collector_vip_host' in config['configurations']['cluster-env']:
     metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
   else:
-    metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts.split(","))
+    metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
   if 'cluster-env' in config['configurations'] and \
       'metrics_collector_vip_port' in config['configurations']['cluster-env']:
     metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']


[30/41] ambari git commit: AMBARI-19113. Add timeout for LogSearch and Infra Solr metainfo.xml (oleewere)

Posted by jo...@apache.org.
AMBARI-19113. Add timeout for LogSearch and Infra Solr metainfo.xml (oleewere)

Change-Id: I8cd2e50def25febcc6c8d08a7f8f04625c69c7be


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/69cdaa77
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/69cdaa77
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/69cdaa77

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 69cdaa775f8c0990efa052c806d41db02c3acc52
Parents: 49d0895
Author: oleewere <ol...@gmail.com>
Authored: Wed Dec 7 12:20:45 2016 +0100
Committer: oleewere <ol...@gmail.com>
Committed: Wed Dec 7 12:28:59 2016 +0100

----------------------------------------------------------------------
 .../resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml     | 1 +
 .../AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py        | 3 ++-
 .../main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml   | 1 +
 .../LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py            | 3 ++-
 .../src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py    | 2 +-
 .../src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py        | 2 +-
 6 files changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/69cdaa77/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml
index 8e1051b..b94812b 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/metainfo.xml
@@ -34,6 +34,7 @@
           <commandScript>
             <script>scripts/infra_solr.py</script>
             <scriptType>PYTHON</scriptType>
+            <timeout>1800</timeout>
           </commandScript>
           <logs>
             <log>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69cdaa77/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
index 41cb504..5143cbb 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py
@@ -112,4 +112,5 @@ def create_ambari_solr_znode():
   solr_cloud_util.create_znode(
     zookeeper_quorum=params.zookeeper_quorum,
     solr_znode=params.infra_solr_znode,
-    java64_home=params.java64_home)
\ No newline at end of file
+    java64_home=params.java64_home,
+    retry=30, interval=5)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69cdaa77/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
index 253294e..3b45bfe 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/metainfo.xml
@@ -36,6 +36,7 @@
           <commandScript>
             <script>scripts/logsearch.py</script>
             <scriptType>PYTHON</scriptType>
+            <timeout>1800</timeout>
           </commandScript>
           <logs>
             <log>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69cdaa77/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
index 6c1a936..351126b 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/setup_logsearch.py
@@ -136,4 +136,5 @@ def check_znode():
   solr_cloud_util.check_znode(
     zookeeper_quorum=params.zookeeper_quorum,
     solr_znode=params.infra_solr_znode,
-    java64_home=params.java64_home)
+    java64_home=params.java64_home,
+    retry=30, interval=5)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69cdaa77/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
index 005283f..6af463c 100644
--- a/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
+++ b/ambari-server/src/test/python/stacks/2.4/AMBARI_INFRA/test_infra_solr.py
@@ -95,7 +95,7 @@ class TestInfraSolr(RMFTestCase):
                                 content = InlineTemplate(self.getConfig()['configurations']['infra-solr-log4j']['content'])
       )
 
-      self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --create-znode --retry 5 --interval 10')
+      self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --create-znode --retry 30 --interval 5')
       self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --cluster-prop --property-name urlScheme --property-value http')
       self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --setup-kerberos-plugin')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/69cdaa77/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
index 6b4fab8..df54cd8 100644
--- a/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
+++ b/ambari-server/src/test/python/stacks/2.4/LOGSEARCH/test_logsearch.py
@@ -129,7 +129,7 @@ class TestLogSearch(RMFTestCase):
                               content = InlineTemplate(self.getConfig()['configurations']['logsearch-audit_logs-solrconfig']['content'])
                               )
 
-    self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 5 --interval 10')
+    self.assertResourceCalled('Execute', 'ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 30 --interval 5')
     self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --download-config --config-dir /tmp/solr_config_hadoop_logs_0.[0-9]* --config-set hadoop_logs --retry 30 --interval 5')
     self.assertResourceCalledRegexp('^File$', '^/tmp/solr_config_hadoop_logs_0.[0-9]*',
                                     content=InlineTemplate(self.getConfig()['configurations']['logsearch-service_logs-solrconfig']['content']),


[05/41] ambari git commit: AMBARI-19088 : Make sure AMS in embedded mode cannot have more than 1 deployed instance. (avijayan)

Posted by jo...@apache.org.
AMBARI-19088 : Make sure AMS in embedded mode cannot have more than 1 deployed instance. (avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c711850a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c711850a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c711850a

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: c711850a8e53d3c838a4f7c5b9b81f7b133afbdf
Parents: edf8ab2
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Mon Dec 5 14:30:16 2016 -0800
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Mon Dec 5 14:30:16 2016 -0800

----------------------------------------------------------------------
 .../0.1.0/package/scripts/metrics_collector.py              | 2 ++
 .../AMBARI_METRICS/0.1.0/package/scripts/params.py          | 9 +++++++--
 2 files changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c711850a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
index c26eafc..7073de6 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_collector.py
@@ -38,6 +38,8 @@ class AmsCollector(Script):
   def configure(self, env, action = None):
     import params
     env.set_params(params)
+    if action == 'start' and params.embedded_mode_multiple_instances:
+      raise Fail("AMS in embedded mode cannot have more than 1 instance. Delete all but 1 instances or switch to Distributed mode ")
     hbase('master', action)
     hbase('regionserver', action)
     ams(name='collector')

http://git-wip-us.apache.org/repos/asf/ambari/blob/c711850a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 4bda033..529cf4c 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -55,10 +55,16 @@ pass
 
 #AMBARI_METRICS data
 ams_pid_dir = status_params.ams_collector_pid_dir
-
+is_ams_distributed = config['configurations']['ams-site']['timeline.metrics.service.operation.mode'] == 'distributed'
 ams_collector_script = "/usr/sbin/ambari-metrics-collector"
 ams_collector_pid_dir = status_params.ams_collector_pid_dir
 ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+ams_collector_list = default("/clusterHostInfo/metrics_collector_hosts", [])
+embedded_mode_multiple_instances = False
+
+if not is_ams_distributed and len(ams_collector_list) > 1:
+  embedded_mode_multiple_instances = True
+
 failover_strategy_blacklisted_interval_seconds = default("/configurations/ams-env/failover_strategy_blacklisted_interval", "600")
 failover_strategy = default("/configurations/ams-site/failover.strategy", "round-robin")
 if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
@@ -180,7 +186,6 @@ hbase_pid_dir = status_params.hbase_pid_dir
 
 is_hbase_distributed = config['configurations']['ams-hbase-site']['hbase.cluster.distributed']
 is_local_fs_rootdir = hbase_root_dir.startswith('file://')
-is_ams_distributed = config['configurations']['ams-site']['timeline.metrics.service.operation.mode'] == 'distributed'
 
 # security is disabled for embedded mode, when HBase is backed by file
 security_enabled = False if not is_hbase_distributed else config['configurations']['cluster-env']['security_enabled']


[13/41] ambari git commit: AMBARI-19106. Kerberos wizard is broken if stacks doesn't contain Ranger (alexantonenko)

Posted by jo...@apache.org.
AMBARI-19106. Kerberos wizard is broken if stacks doesn't contain Ranger (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9b46fd2a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9b46fd2a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9b46fd2a

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 9b46fd2acd6a5d1bd8ce474315fb318f1cfe6a3a
Parents: 0f29751
Author: Alex Antonenko <hi...@gmail.com>
Authored: Tue Dec 6 19:18:34 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Dec 6 19:18:34 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/views/common/controls_view.js    |  3 +-
 .../test/views/common/controls_view_test.js     | 74 +++++++++++++++++++-
 2 files changed, 75 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9b46fd2a/ambari-web/app/views/common/controls_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/controls_view.js b/ambari-web/app/views/common/controls_view.js
index dd475b6..bcb90d8 100644
--- a/ambari-web/app/views/common/controls_view.js
+++ b/ambari-web/app/views/common/controls_view.js
@@ -1048,11 +1048,12 @@ App.CheckDBConnectionView = Ember.View.extend({
   isBtnDisabled: Em.computed.or('!isValidationPassed', 'isConnecting'),
   /** @property {object} requiredProperties - properties that necessary for database connection **/
   requiredProperties: function() {
+    var ranger = App.StackService.find().findProperty('serviceName', 'RANGER');
     var propertiesMap = {
       OOZIE: ['oozie.db.schema.name', 'oozie.service.JPAService.jdbc.username', 'oozie.service.JPAService.jdbc.password', 'oozie.service.JPAService.jdbc.driver', 'oozie.service.JPAService.jdbc.url'],
       HIVE: ['ambari.hive.db.schema.name', 'javax.jdo.option.ConnectionUserName', 'javax.jdo.option.ConnectionPassword', 'javax.jdo.option.ConnectionDriverName', 'javax.jdo.option.ConnectionURL'],
       KERBEROS: ['kdc_hosts'],
-      RANGER: App.StackService.find('RANGER').compareCurrentVersion('0.5') > -1 ?
+      RANGER: ranger && App.StackService.find().findProperty('serviceName', 'RANGER').compareCurrentVersion('0.5') > -1 ?
           ['db_user', 'db_password', 'db_name', 'ranger.jpa.jdbc.url', 'ranger.jpa.jdbc.driver'] :
           ['db_user', 'db_password', 'db_name', 'ranger_jdbc_connection_url', 'ranger_jdbc_driver']
     };

http://git-wip-us.apache.org/repos/asf/ambari/blob/9b46fd2a/ambari-web/test/views/common/controls_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/common/controls_view_test.js b/ambari-web/test/views/common/controls_view_test.js
index c5c5338..3ff5253 100644
--- a/ambari-web/test/views/common/controls_view_test.js
+++ b/ambari-web/test/views/common/controls_view_test.js
@@ -651,7 +651,6 @@ describe('App.CheckDBConnectionView', function () {
         expect(view.get('masterHostName')).to.equal(item.value);
       });
     });
-
   });
 
   describe('#setResponseStatus', function () {
@@ -836,6 +835,79 @@ describe('App.CheckDBConnectionView', function () {
       expect(args[0]).exists;
     });
   });
+
+  describe('#requriedProperties', function() {
+    var cases;
+    beforeEach(function() {
+      this.stackServiceStub = sinon.stub(App.StackService, 'find');
+    });
+    afterEach(function() {
+      this.stackServiceStub.restore();
+    });
+
+    cases = [
+      {
+        stackServices: [
+          {name: 'OOZIE', version: '1.0.0'}
+        ],
+        parentViewServiceName: 'OOZIE',
+        e: ['oozie.db.schema.name', 'oozie.service.JPAService.jdbc.username', 'oozie.service.JPAService.jdbc.password', 'oozie.service.JPAService.jdbc.driver', 'oozie.service.JPAService.jdbc.url'],
+        m: 'should return Oozie specific properties'
+      },
+      {
+        stackServices: [
+          {name: 'HIVE', version: '1.0.0'}
+        ],
+        parentViewServiceName: 'HIVE',
+        e: ['ambari.hive.db.schema.name', 'javax.jdo.option.ConnectionUserName', 'javax.jdo.option.ConnectionPassword', 'javax.jdo.option.ConnectionDriverName', 'javax.jdo.option.ConnectionURL'],
+        m: 'should return Hive specific properties'
+      },
+      {
+        stackServices: [
+          {name: 'KERBEROS', version: '1.0.0'}
+        ],
+        parentViewServiceName: 'KERBEROS',
+        e: ['kdc_hosts'],
+        m: 'should return specific Kerberos specific properties'
+      },
+      {
+        stackServices: [
+          {name: 'RANGER', version: '0.4.9'}
+        ],
+        parentViewServiceName: 'RANGER',
+        e: ['db_user', 'db_password', 'db_name', 'ranger_jdbc_connection_url', 'ranger_jdbc_driver'],
+        m: 'should return specific properties for Ranger when its version < 0.5'
+      },
+      {
+        stackServices: [
+          {name: 'RANGER', version: '1.0.0'}
+        ],
+        parentViewServiceName: 'RANGER',
+        e: ['db_user', 'db_password', 'db_name', 'ranger.jpa.jdbc.url', 'ranger.jpa.jdbc.driver'],
+        m: 'should return specific properties for Ranger when its version > 0.5'
+      }
+    ];
+
+    cases.forEach(function(test) {
+      it(test.m, function() {
+        this.stackServiceStub.returns(test.stackServices.map(function(service) {
+          return Em.Object.create({
+            serviceName: service.name,
+            serviceVersion: service.version,
+            compareCurrentVersion: App.StackService.proto().compareCurrentVersion
+          });
+        }));
+        var view = App.CheckDBConnectionView.create({
+          parentView: {
+            service: {
+              serviceName: test.parentViewServiceName
+            }
+          }
+        });
+        expect(view.get('requiredProperties')).to.be.eql(test.e);
+      });
+    });
+  });
 });
 
 describe('App.BaseUrlTextField', function () {


[21/41] ambari git commit: AMBARI-19093. HDP 3.0 support for HDFS with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)

Posted by jo...@apache.org.
AMBARI-19093. HDP 3.0 support for HDFS with configs, kerberos, widgets, metrics, quicklinks, and themes (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2a710b35
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2a710b35
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2a710b35

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 2a710b3587965c09e7b034ea79a7eca31fc35c25
Parents: aed9839
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Dec 5 15:00:07 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Tue Dec 6 16:53:31 2016 -0800

----------------------------------------------------------------------
 .../services/HDFS/configuration/core-site.xml   |  56 ++
 .../services/HDFS/configuration/hadoop-env.xml  | 200 ++++++
 .../services/HDFS/configuration/hdfs-log4j.xml  | 226 +++++++
 .../services/HDFS/configuration/hdfs-site.xml   | 153 +++++
 .../HDFS/configuration/ranger-hdfs-audit.xml    | 217 +++++++
 .../ranger-hdfs-plugin-properties.xml           |  98 +++
 .../configuration/ranger-hdfs-policymgr-ssl.xml |  67 ++
 .../HDFS/configuration/ranger-hdfs-security.xml |  65 ++
 .../services/HDFS/configuration/widgets.json    | 649 +++++++++++++++++++
 .../stacks/HDP/3.0/services/HDFS/kerberos.json  | 246 +++++++
 .../stacks/HDP/3.0/services/HDFS/metainfo.xml   | 190 ++++++
 .../services/HDFS/quicklinks/quicklinks.json    |  80 +++
 .../HDP/3.0/services/HDFS/themes/theme.json     | 179 +++++
 13 files changed, 2426 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
new file mode 100644
index 0000000..9dcf561
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>hadoop.http.authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description>
+      Indicates if anonymous requests are allowed when using &apos;simple&apos; authentication.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.key.provider.path</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_host</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_port</name>
+      </property>
+      <property>
+        <type>kms-env</type>
+        <name>kms_port</name>
+      </property>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.attrib.ssl.enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8697740
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,200 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>keyserver_host</name>
+    <value> </value>
+    <display-name>Key Server Host</display-name>
+    <description>Hostnames where Key Management Server is installed</description>
+    <value-attributes>
+      <type>string</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>keyserver_port</name>
+    <value/>
+    <display-name>Key Server Port</display-name>
+    <description>Port number where Key Management Server is available</description>
+    <value-attributes>
+      <type>int</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hadoop-env template</display-name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hadoop Configuration Directory
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+
+{% if java_version &lt; 8 %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+
+{% else %}
+SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+{% endif %}
+
+HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Add database libraries
+JAVA_JDBC_LIBS=""
+if [ -d "/usr/share/java" ]; then
+  for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+  do
+    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+  done
+fi
+
+# Add libraries to the hadoop classpath - some may not need a colon as they already include it
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+# Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}
+
+export HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"
+
+
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. 
+# Makes sense to fix only when runing DN as root 
+if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+  {% if is_datanode_max_locked_memory_set %}
+  ulimit -l {{datanode_max_locked_memory}}
+  {% endif %}
+  ulimit -n {{hdfs_user_nofile_limit}}
+fi
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nfsgateway_heapsize</name>
+    <display-name>NFSGateway maximum Java heap size</display-name>
+    <value>1024</value>
+    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..215a6ee
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,226 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>content</name>
+    <display-name>hdfs-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+# To change daemon root logger use hadoop_root_logger in hadoop-env
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# NameNode metrics logging.
+# The default is to retain two namenode-metrics.log files up to 64MB each.
+#
+namenode.metrics.logger=INFO,NullAppender
+log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
+log4j.additivity.NameNodeMetricsLog=false
+log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
+log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
+log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
+log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..ac141d1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,153 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true">
+  <!-- These configs were inherited from HDP 2.1 -->
+  <property>
+    <name>dfs.namenode.audit.log.async</name>
+    <value>true</value>
+    <description>Whether to enable async auditlog</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.fslock.fair</name>
+    <value>false</value>
+    <description>Whether fsLock is fair</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>dfs.namenode.startup.delay.block.deletion.sec</name>
+    <value>3600</value>
+    <description>
+      The delay in seconds at which we will pause the blocks deletion
+      after Namenode startup. By default it's disabled.
+      In the case a directory has large number of directories and files are
+      deleted, suggested delay is one hour to give the administrator enough time
+      to notice large number of pending deletion blocks and take corrective
+      action.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.journalnode.edits.dir</name>
+    <value>/hadoop/hdfs/journalnode</value>
+    <description>The path where the JournalNode daemon will store its local state. </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.client.retry.policy.enabled</name>
+    <value>false</value>
+    <description>Enables HDFS client retry in the event of a NameNode failure.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.content-summary.limit</name>
+    <value>5000</value>
+    <description>Dfs content summary limit.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.encryption.key.provider.uri</name>
+    <description>
+      The KeyProvider to use when interacting with encryption keys used
+      when reading and writing to an encryption zone.
+    </description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_host</name>
+      </property>
+      <property>
+        <type>hadoop-env</type>
+        <name>keyserver_port</name>
+      </property>
+      <property>
+        <type>kms-env</type>
+        <name>kms_port</name>
+      </property>
+      <property>
+        <type>ranger-kms-site</type>
+        <name>ranger.service.https.attrib.ssl.enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>nfs.file.dump.dir</name>
+    <value>/tmp/.hdfs-nfs</value>
+    <display-name>NFSGateway dump directory</display-name>
+    <description>
+      This directory is used to temporarily save out-of-order writes before
+      writing to HDFS. For each file, the out-of-order writes are dumped after
+      they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
+      One needs to make sure the directory has enough space.
+    </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>nfs.exports.allowed.hosts</name>
+    <value>* rw</value>
+    <description>
+      By default, the export can be mounted by any client. To better control the access,
+      users can update the following property. The value string contains machine name and access privilege,
+      separated by whitespace characters. Machine name format can be single host, wildcards, and IPv4
+      networks.The access privilege uses rw or ro to specify readwrite or readonly access of the machines
+      to exports. If the access privilege is not provided, the default is read-only. Entries are separated
+      by &quot;;&quot;. For example: &quot;192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;&quot;.
+    </description>
+    <display-name>Allowed hosts</display-name>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.encrypt.data.transfer.cipher.suites</name>
+    <value>AES/CTR/NoPadding</value>
+    <description>
+      This value may be either undefined or AES/CTR/NoPadding. If defined, then 
+      dfs.encrypt.data.transfer uses the specified cipher suite for data encryption. 
+      If not defined, then only the algorithm specified in dfs.encrypt.data.transfer.algorithm 
+      is used. By default, the property is not defined.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.inode.attributes.provider.class</name>
+    <description>Enable ranger hdfs plugin</description>
+    <depends-on>
+      <property>
+        <type>ranger-hdfs-plugin-properties</type>
+        <name>ranger-hdfs-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml
new file mode 100644
index 0000000..fd41817
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-audit.xml
@@ -0,0 +1,217 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <value>false</value>
+    <display-name>Audit to DB</display-name>
+    <description>Is Audit to DB enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.db</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <value>{{audit_jdbc_url}}</value>
+    <description>Audit DB JDBC URL</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <value>{{xa_audit_db_user}}</value>
+    <description>Audit DB JDBC User</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <value>crypted</value>
+    <property-type>PASSWORD</property-type>
+    <description>Audit DB JDBC Password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <value>{{jdbc_driver}}</value>
+    <description>Audit DB JDBC Driver</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>Credential file store</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <value>/var/log/hadoop/hdfs/audit/db/spool</value>
+    <description>/var/log/hadoop/hdfs/audit/db/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hadoop/hdfs/audit/hdfs/spool</value>
+    <description>/var/log/hadoop/hdfs/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hadoop/hdfs/audit/solr/spool</value>
+    <description>/var/log/hadoop/hdfs/audit/solr/spool</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>false</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs are deleted in HDP 2.5. -->
+  <property>
+    <name>xasecure.audit.destination.db</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.url</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.user</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.password</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.jdbc.driver</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.credential.provider.file</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.db.batch.filespool.dir</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
new file mode 100644
index 0000000..b31742c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
@@ -0,0 +1,98 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <!-- These configs were inherited from HDP 2.2 -->
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for HDFS</display-name>
+    <description>This user must be system user and also present at Ranger
+      admin portal</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value/>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger-hdfs-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for HDFS</display-name>
+    <description>Enable ranger hdfs plugin</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-hdfs-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hadoop</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>hadoop</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin
+    </description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- These configs were inherited from HDP 2.5 -->
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value>authentication</value>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false" />
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
new file mode 100644
index 0000000..1bc83df
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml
new file mode 100644
index 0000000..1b0a821
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/ranger-hdfs-security.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!-- These configs were inherited from HDP 2.3 -->
+  <property>
+    <name>ranger.plugin.hdfs.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing Hdfs policies</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.rest.ssl.config.file</name>
+    <value>/etc/hadoop/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hdfs.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>xasecure.add-hadoop-authorization</name>
+    <value>true</value>
+    <description>Enable/Disable the default hadoop authorization (based on rwxrwxrwx permission on the resource) if Ranger Authorization fails.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
new file mode 100644
index 0000000..4a645b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/configuration/widgets.json
@@ -0,0 +1,649 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hdfs_dashboard",
+      "display_name": "Standard HDFS Dashboard",
+      "section_name": "HDFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "NameNode GC count",
+          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcCount._rate",
+              "metric_path": "metrics/jvm/gcCount._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC total count",
+              "value": "${jvm.JvmMetrics.GcCount._rate}"
+            },
+            {
+              "name": "GC count of type major collection",
+              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode GC time",
+          "description": "Total time taken by major type garbage collections in milliseconds.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate",
+              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC time in major collection",
+              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NN Connection Load",
+          "description": "Number of open RPC connections being managed by NameNode.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.NumOpenConnections",
+              "metric_path": "metrics/rpc/client/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.NumOpenConnections",
+              "metric_path": "metrics/rpc/datanode/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Client Connections",
+              "value": "${rpc.rpc.client.NumOpenConnections}"
+            },
+            {
+              "name": "Open Datanode Connections",
+              "value": "${rpc.rpc.datanode.NumOpenConnections}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Heap",
+          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "JVM heap committed",
+              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
+            },
+            {
+              "name": "JVM heap used",
+              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system",
+              "metric_path": "metrics/cpu/cpu_system",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_user",
+              "metric_path": "metrics/cpu/cpu_user",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_nice",
+              "metric_path": "metrics/cpu/cpu_nice",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_idle",
+              "metric_path": "metrics/cpu/cpu_idle",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
+            },
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total - mem_free)/mem_total) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "NameNode RPC",
+          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.client.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.client.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/client/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.datanode.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/datanode/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Client RPC Queue Wait time",
+              "value": "${rpc.rpc.client.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Client RPC Processing time",
+              "value": "${rpc.rpc.client.RpcProcessingTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Queue Wait time",
+              "value": "${rpc.rpc.datanode.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "Datanode RPC Processing time",
+              "value": "${rpc.rpc.datanode.RpcProcessingTimeAvgTime}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "ms"
+          }
+        },
+        {
+          "widget_name": "NameNode Operations",
+          "description": "Rate per second of number of file operation over time.",
+          "widget_type": "GRAPH",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.namenode.TotalFileOps._rate",
+              "metric_path": "metrics/dfs/namenode/TotalFileOps._rate",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "NameNode File Operations",
+              "value": "${dfs.namenode.TotalFileOps._rate}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Failed disk volumes",
+          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum",
+              "metric_path": "metrics/dfs/datanode/NumFailedVolumes",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Failed disk volumes",
+              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes._sum}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Blocks With Corrupted Replicas",
+          "description": "Number represents data blocks with at least one corrupted replica (but not all of them). Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Blocks With Corrupted Replicas",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "Under Replicated Blocks",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Under Replicated Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "description": "Percentage of available space used in the DFS.",
+          "widget_type": "GAUGE",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0.75",
+            "error_threshold": "0.9"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hdfs_heatmap",
+      "section_name": "HDFS_HEATMAPS",
+      "display_name": "HDFS Heatmaps",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HDFS Bytes Read",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Read",
+              "value": "${dfs.datanode.BytesRead._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "HDFS Bytes Written",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Written",
+              "value": "${dfs.datanode.BytesWritten._rate}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "DataNode Garbage Collection Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Garbage Collection Time",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Committed",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Committed",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Disk I/O Utilization",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_read._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.BytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/bytes_written._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalReadTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalReadTime._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalWriteTime._rate",
+              "metric_path": "metrics/dfs/datanode/TotalWriteTime._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Disk I/O Utilization",
+              "value": "${((dfs.datanode.BytesRead._rate/dfs.datanode.TotalReadTime._rate)+(dfs.datanode.BytesWritten._rate/dfs.datanode.TotalWriteTime._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Network I/O Utilization",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.RemoteBytesRead._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesRead._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.ReadsFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/reads_from_remote_client._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.RemoteBytesWritten._rate",
+              "metric_path": "metrics/dfs/datanode/RemoteBytesWritten._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.WritesFromRemoteClient._rate",
+              "metric_path": "metrics/dfs/datanode/writes_from_remote_client._rate",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Network I/O Utilization",
+              "value": "${((dfs.datanode.RemoteBytesRead._rate/dfs.datanode.ReadsFromRemoteClient._rate)+(dfs.datanode.RemoteBytesWritten._rate/dfs.datanode.WritesFromRemoteClient._rate))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining",
+              "metric_path": "metrics/FSDatasetState/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl/Remaining",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${((FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity - FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining)/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a710b35/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
new file mode 100644
index 0000000..9000e95
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/services/HDFS/kerberos.json
@@ -0,0 +1,246 @@
+{
+  "services": [
+    {
+      "name": "HDFS",
+      "identities": [
+        {
+          "name": "/spnego",
+          "principal": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
+          },
+          "keytab": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+          }
+        },
+        {
+          "ranger-hdfs-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name":  "HDFS_CLIENT",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        },
+        {
+          "name": "NAMENODE",
+          "identities": [
+            {
+              "name": "hdfs",
+              "principal": {
+                "value": "${hadoop-env/hdfs_user}-${cluster_name|toLower()}@${realm}",
+                "type" : "user" ,
+                "configuration": "hadoop-env/hdfs_principal_name",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hdfs.headless.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hadoop-env/hdfs_user_keytab"
+              }
+            },
+            {
+              "name": "namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/namenode_nn",
+              "principal": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.principal"                
+              },
+              "keytab": {
+                "configuration": "ranger-hdfs-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.block.access.token.enable": "true"
+              }
+            }
+          ]
+        },
+        {
+          "name": "DATANODE",
+          "identities": [
+            {
+              "name": "datanode_dn",
+              "principal": {
+                "value": "dn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/dn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.datanode.keytab.file"
+              }
+            }
+          ],
+          "configurations" : [
+            {
+              "hdfs-site" : {
+                "dfs.datanode.address" : "0.0.0.0:1019",
+                "dfs.datanode.http.address": "0.0.0.0:1022"
+              }
+            }
+          ]
+        },
+        {
+          "name": "SECONDARY_NAMENODE",
+          "identities": [
+            {
+              "name": "secondary_namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        },
+        {
+          "name": "NFS_GATEWAY",
+          "identities": [
+            {
+              "name": "nfsgateway",
+              "principal": {
+                "value": "nfs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/nfs.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nfs.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/nfs.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "JOURNALNODE",
+          "identities": [
+            {
+              "name": "journalnode_jn",
+              "principal": {
+                "value": "jn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.journalnode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}


[17/41] ambari git commit: AMBARI-19091. HDP 3.0 TP - bootstrap the stack with metainfo, cluster-env, repos, and hooks (alejandro)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..d838211
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
@@ -0,0 +1,326 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+
+config = Script.get_config()
+
+# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
+# This is required if tarballs are going to be copied to HDFS, so set to False
+sysprep_skip_copy_fast_jar_hdfs = default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+dfs_type = default("/commandParams/dfs_type", "")
+stack_root = Script.get_stack_root()
+hadoop_conf_dir = "/etc/hadoop/conf"
+component_list = default("/localComponents", [])
+
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
+hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
+
+# hadoop default params
+mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_home = stack_select.get_hadoop_dir("home")
+create_lib_snappy_symlinks = False
+
+  
+current_service = config['serviceName']
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#users and groups
+has_hadoop_env = 'hadoop-env' in config['configurations']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+has_namenode = not len(namenode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_hcat_server_host = not len(hcat_server_hosts) == 0
+has_hive_server_host = not len(hive_server_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+metric_collector_port = None
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+# Cluster Zookeeper quorum
+zookeeper_quorum = None
+if has_zk_host:
+  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+  else:
+    zookeeper_clientPort = '2181'
+  zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
+  # last port config
+  zookeeper_quorum += ':' + zookeeper_clientPort
+
+#hadoop params
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hbase_tmp_dir = "/tmp/hbase-hbase"
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+ambari_server_resources = config['hostLevelParams']['jdk_location']
+oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
+else:
+  rca_enabled = False
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
+  log4j_props = config['configurations']['hdfs-log4j']['content']
+  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
+    log4j_props += config['configurations']['yarn-log4j']['content']
+else:
+  log4j_props = None
+
+refresh_topology = False
+command_params = config["commandParams"] if "commandParams" in config else None
+if command_params is not None:
+  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+  
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+#host info
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+#topology files
+net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
+net_topology_mapping_data_file_name = 'topology_mappings.data'
+net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+
+#Added logic to create /tmp and /user directory for HCFS stack.  
+has_core_site = 'core-site' in config['configurations']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+
+##### Namenode RPC ports - metrics config section start #####
+
+# Figure out the rpc ports for current namenode
+nn_rpc_client_port = None
+nn_rpc_dn_port = None
+nn_rpc_healthcheck_port = None
+
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+if dfs_ha_namenode_ids:
+ dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+ dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+ if dfs_ha_namenode_ids_array_len > 1:
+   dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+ for nn_id in dfs_ha_namemodes_ids_list:
+   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+   if hostname in nn_host:
+     namenode_id = nn_id
+     namenode_rpc = nn_host
+   pass
+ pass
+else:
+ namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
+
+if namenode_rpc:
+ nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
+
+if dfs_ha_enabled:
+ dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+else:
+ dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
+
+if dfs_service_rpc_address:
+ nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
+
+if dfs_lifeline_rpc_address:
+ nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
+
+is_nn_client_port_configured = False if nn_rpc_client_port is None else True
+is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
+is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
+
+##### end #####
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
new file mode 100644
index 0000000..548f051
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile, Template
+from resource_management.libraries.functions import format
+
+
+def create_topology_mapping():
+  import params
+
+  File(params.net_topology_mapping_data_file_path,
+       content=Template("topology_mappings.data.j2"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script():
+  import params
+
+  File(params.net_topology_script_file_path,
+       content=StaticFile('topology_script.py'),
+       mode=0755,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script_and_mapping():
+  import params
+  if params.has_hadoop_env:
+    create_topology_mapping()
+    create_topology_script()

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..5dce8e0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,191 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+
+from resource_management import *
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  Execute(("setenforce","0"),
+          only_if="test -f /selinux/enforce",
+          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
+          sudo=True,
+  )
+
+  #directories
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    Directory(params.hdfs_log_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              mode=0775,
+              cd_access='a',
+    )
+    if params.has_namenode:
+      Directory(params.hadoop_pid_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group='root',
+              cd_access='a',
+      )
+    Directory(params.hadoop_tmp_dir,
+              create_parents = True,
+              owner=params.hdfs_user,
+              cd_access='a',
+              )
+  #files
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+      
+    # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
+    if params.sysprep_skip_copy_fast_jar_hdfs:
+      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
+    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+      # for source-code of jar goto contrib/fast-hdfs-resource
+      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("fast-hdfs-resource.jar")
+      )
+      
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+           owner=tc_owner,
+           content=Template('commons-logging.properties.j2')
+      )
+
+      health_check_template_name = "health_check"
+      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
+           owner=tc_owner,
+           content=Template(health_check_template_name + ".j2")
+      )
+
+      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+      if (params.log4j_props != None):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+             content=params.log4j_props
+        )
+      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+        )
+
+      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+           owner=params.hdfs_user,
+           group=params.user_group,
+           content=InlineTemplate(params.hadoop_metrics2_properties_content)
+      )
+
+    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
+       create_dirs()
+
+    create_microsoft_r_dir()
+
+
+def setup_configs():
+  """
+  Creates configs for services HDFS mapred
+  """
+  import params
+
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    if os.path.exists(params.hadoop_conf_dir):
+      File(params.task_log4j_properties_location,
+           content=StaticFile("task-log4j.properties"),
+           mode=0755
+      )
+
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+                owner=params.hdfs_user,
+                group=params.user_group
+      )
+
+  generate_include_file()
+
+
+def generate_include_file():
+  import params
+
+  if params.has_namenode and params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+def create_javahome_symlink():
+  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
+    Directory("/usr/jdk64/",
+         create_parents = True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
+
+def create_dirs():
+   import params
+   params.HdfsResource(params.hdfs_tmp_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777
+   )
+   params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+   )
+   params.HdfsResource(None,
+                      action="execute"
+   )
+
+def create_microsoft_r_dir():
+  import params
+  if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
+    directory = '/user/RevoShare'
+    try:
+      params.HdfsResource(directory,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hdfs_user,
+                          mode=0777)
+      params.HdfsResource(None, action="execute")
+    except Exception as exception:
+      Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..2197ba5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..1adba80
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..2f3aab6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,105 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name={{hostname}}
+*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
+*.sink.timeline.protocol={{metric_collector_protocol}}
+*.sink.timeline.port={{metric_collector_port}}
+
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% if is_nn_client_port_configured %}
+# Namenode rpc ports customization
+namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
+{% endif %}
+{% if is_nn_dn_port_configured %}
+namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
+{% endif %}
+{% if is_nn_healthcheck_port_configured %}
+namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
+{% endif %}
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..0a03d17
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,81 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+# Run all checks
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
new file mode 100644
index 0000000..15034d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
@@ -0,0 +1,24 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+    #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[network_topology]
+{% for host in all_hosts %}
+{% if host in slave_hosts %}
+{{host}}={{all_racks[loop.index-1]}}
+{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
+{% endif %}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
new file mode 100644
index 0000000..9579d0f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/etc/security/keytabs",
+    "additional_realms": ""
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type": "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}",
+        "type": "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username": "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ],
+  "services": [
+    {
+      "name": "AMBARI",
+      "components": [
+        {
+          "name": "AMBARI_SERVER",
+          "identities": [
+            {
+              "name": "ambari-server",
+              "principal": {
+                "value": "ambari-server-${cluster_name|toLower()}@${realm}",
+                "type": "user",
+                "configuration": "cluster-env/ambari_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/ambari.server.keytab",
+                "owner": {
+                  "access": "r"
+                }
+              }
+            },
+            {
+              "name" : "ambari-server_spnego",
+              "reference" : "/spnego"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
new file mode 100644
index 0000000..0364d41
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <versions>
+    <active>true</active>
+  </versions>
+  <minJdk>1.7</minJdk>
+  <maxJdk>1.8</maxJdk>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
new file mode 100644
index 0000000..dd87b72
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
@@ -0,0 +1,323 @@
+{
+  "stack_features": [
+    {
+      "name": "snappy",
+      "description": "Snappy compressor/decompressor support",
+      "min_version": "2.0.0.0",
+      "max_version": "2.2.0.0"
+    },
+    {
+      "name": "lzo",
+      "description": "LZO libraries support",
+      "min_version": "2.2.1.0"
+    },
+    {
+      "name": "express_upgrade",
+      "description": "Express upgrade support",
+      "min_version": "2.1.0.0"
+    },
+    {
+      "name": "rolling_upgrade",
+      "description": "Rolling upgrade support",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "config_versioning",
+      "description": "Configurable versions support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "datanode_non_root",
+      "description": "DataNode running as non-root support (AMBARI-7615)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "remove_ranger_hdfs_plugin_env",
+      "description": "HDFS removes Ranger env files (AMBARI-14299)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger",
+      "description": "Ranger Service support",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_tagsync_component",
+      "description": "Ranger Tagsync component support (AMBARI-14383)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "phoenix",
+      "description": "Phoenix Service support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "nfs",
+      "description": "NFS support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "tez_for_spark",
+      "description": "Tez dependency for Spark",
+      "min_version": "2.2.0.0",
+      "max_version": "2.3.0.0"
+    },
+    {
+      "name": "timeline_state_store",
+      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "copy_tarball_to_hdfs",
+      "description": "Copy tarball to HDFS support (AMBARI-12113)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "spark_16plus",
+      "description": "Spark 1.6+",
+      "min_version": "2.4.0.0"
+    },
+    {
+      "name": "spark_thriftserver",
+      "description": "Spark Thrift Server",
+      "min_version": "2.3.2.0"
+    },
+    {
+      "name": "storm_kerberos",
+      "description": "Storm Kerberos support (AMBARI-7570)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "storm_ams",
+      "description": "Storm AMS integration (AMBARI-10710)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "create_kafka_broker_id",
+      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+      "min_version": "2.2.0.0",
+      "max_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_listeners",
+      "description": "Kafka listeners (AMBARI-10984)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_kerberos",
+      "description": "Kafka Kerberos support (AMBARI-10984)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "pig_on_tez",
+      "description": "Pig on Tez support (AMBARI-7863)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_usersync_non_root",
+      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger_audit_db_support",
+      "description": "Ranger Audit to DB support",
+      "min_version": "2.2.0.0",
+      "max_version": "2.5.0.0"
+    },
+    {
+      "name": "accumulo_kerberos_user_auth",
+      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "knox_versioned_data_dir",
+      "description": "Use versioned data dir for Knox (AMBARI-13164)",
+      "min_version": "2.3.2.0"
+    },
+    {
+      "name": "knox_sso_topology",
+      "description": "Knox SSO Topology support (AMBARI-13975)",
+      "min_version": "2.3.8.0"
+    },
+    {
+      "name": "atlas_rolling_upgrade",
+      "description": "Rolling upgrade support for Atlas",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "oozie_admin_user",
+      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_create_hive_tez_configs",
+      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_setup_shared_lib",
+      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_host_kerberos",
+      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+      "min_version": "2.0.0.0",
+      "max_version": "2.2.0.0"
+    },
+    {
+      "name": "falcon_extensions",
+      "description": "Falcon Extension",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hive_metastore_upgrade_schema",
+      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_server_interactive",
+      "description": "Hive server interactive support (AMBARI-15573)",
+      "min_version": "2.5.0.0"
+     },
+    {
+      "name": "hive_webhcat_specific_configs",
+      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_purge_table",
+      "description": "Hive purge table support (AMBARI-12260)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_server2_kerberized_env",
+      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+      "min_version": "2.2.3.0",
+      "max_version": "2.2.5.0"
+     },
+    {
+      "name": "hive_env_heapsize",
+      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_kms_hsm_support",
+      "description": "Ranger KMS HSM support (AMBARI-15752)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_log4j_support",
+      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_kerberos_support",
+      "description": "Ranger Kerberos support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hive_metastore_site_support",
+      "description": "Hive Metastore site support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_usersync_password_jceks",
+      "description": "Saving Ranger Usersync credentials in jceks",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_install_infra_client",
+      "description": "Ambari Infra Service support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "falcon_atlas_support_2_3",
+      "description": "Falcon Atlas integration support for 2.3 stack",
+      "min_version": "2.3.99.0",
+      "max_version": "2.4.0.0"
+    },
+    {
+      "name": "falcon_atlas_support",
+      "description": "Falcon Atlas integration",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hbase_home_directory",
+      "description": "Hbase home directory in HDFS needed for HBASE backup",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "spark_livy",
+      "description": "Livy as slave component of spark",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_ranger_plugin_support",
+      "description": "Atlas Ranger plugin support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_conf_dir_in_path",
+      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+      "min_version": "2.3.0.0",
+      "max_version": "2.4.99.99"
+    },
+    {
+      "name": "atlas_upgrade_support",
+      "description": "Atlas supports express and rolling upgrades",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_hook_support",
+      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_pid_support",
+      "description": "Ranger Service support pid generation AMBARI-16756",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_kms_pid_support",
+      "description": "Ranger KMS Service support pid generation",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_admin_password_change",
+      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "storm_metrics_apache_classes",
+      "description": "Metrics sink for Storm that uses Apache class names",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "spark_java_opts_support",
+      "description": "Allow Spark to generate java-opts file",
+      "min_version": "2.2.0.0",
+      "max_version": "2.4.0.0"
+    },
+    {
+      "name": "atlas_hbase_setup",
+      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_hive_plugin_jdbc_url",
+      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "zkfc_version_advertised",
+      "description": "ZKFC advertise version",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "phoenix_core_hdfs_site_required",
+      "description": "HDFS and CORE site required for Phoenix",
+      "max_version": "2.5.9.9"
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
new file mode 100644
index 0000000..d1aab4b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_tools.json
@@ -0,0 +1,4 @@
+{
+  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
+  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
new file mode 100644
index 0000000..5145064
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/repos/repoinfo.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos7/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos7</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="suse11">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11sp3/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="suse12">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/sles12/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="ubuntu12">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu12/3.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="debian7">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/debian7/3.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/debian6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="ubuntu14">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu14/3.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+  <os family="ubuntu16">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu14/3.x/updates/3.0.0.0</baseurl>
+      <repoid>HDP-3.0</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.21</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}


[33/41] ambari git commit: AMBARI-19111: Ambari server upgrade log printout refactoring (dili)

Posted by jo...@apache.org.
AMBARI-19111: Ambari server upgrade log printout refactoring (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/38827c07
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/38827c07
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/38827c07

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 38827c0706f3463b912251908f786d3b67096ce1
Parents: 9e1318e
Author: Di Li <di...@apache.org>
Authored: Wed Dec 7 11:22:05 2016 -0500
Committer: Di Li <di...@apache.org>
Committed: Wed Dec 7 11:22:05 2016 -0500

----------------------------------------------------------------------
 .../main/python/ambari_commons/logging_utils.py |  5 ++-
 .../main/python/ambari_server/serverUpgrade.py  | 38 ++++++++++----------
 2 files changed, 23 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/38827c07/ambari-common/src/main/python/ambari_commons/logging_utils.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/logging_utils.py b/ambari-common/src/main/python/ambari_commons/logging_utils.py
index 4ebe696..6535ebc 100644
--- a/ambari-common/src/main/python/ambari_commons/logging_utils.py
+++ b/ambari-common/src/main/python/ambari_commons/logging_utils.py
@@ -67,7 +67,10 @@ def set_debug_mode_from_options(options):
 #
 # Prints an "info" messsage.
 #
-def print_info_msg(msg):
+def print_info_msg(msg, forced=False):
+  if forced:
+    print("INFO: " + msg)
+    return
   if _VERBOSE:
     print("INFO: " + msg)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/38827c07/ambari-server/src/main/python/ambari_server/serverUpgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
index 7d7a9ab..6f17900 100644
--- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
+++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
@@ -163,7 +163,7 @@ def run_stack_upgrade(args, stackName, stackVersion, repo_url, repo_url_os):
                                             "updateStackId",
                                             "'" + json.dumps(stackId) + "'")
   (retcode, stdout, stderr) = run_os_command(command)
-  print_info_msg("Return code from stack upgrade command, retcode = " + str(retcode))
+  print_info_msg("Return code from stack upgrade command, retcode = {0}".format(str(retcode)))
   if retcode > 0:
     print_error_msg("Error executing stack upgrade, please check the server logs.")
   return retcode
@@ -182,10 +182,9 @@ def run_metainfo_upgrade(args, keyValueMap=None):
                                               'updateMetaInfo',
                                               "'" + json.dumps(keyValueMap) + "'")
     (retcode, stdout, stderr) = run_os_command(command)
-    print_info_msg("Return code from stack upgrade command, retcode = " + str(retcode))
+    print_info_msg("Return code from stack upgrade command, retcode = {0}".format(str(retcode)))
     if retcode > 0:
-      print_error_msg("Error executing metainfo upgrade, please check the "
-                      "server logs.")
+      print_error_msg("Error executing metainfo upgrade, please check the server logs.")
 
   return retcode
 
@@ -195,7 +194,7 @@ def run_metainfo_upgrade(args, keyValueMap=None):
 #
 
 def change_objects_owner(args):
-  print 'Fixing database objects owner'
+  print_info_msg('Fixing database objects owner', True)
 
   properties = Properties()   #Dummy, args contains the dbms name and parameters already
 
@@ -228,8 +227,8 @@ def upgrade_local_repo(args):
 
     repo_file = os.path.join(stack_root, stack_version_local, "repos", "repoinfo.xml")
 
-    print_info_msg("Local repo file: " + repo_file_local)
-    print_info_msg("Repo file: " + repo_file_local)
+    print_info_msg("Local repo file: {0}".format(repo_file_local))
+    print_info_msg("Repo file: {0}".format(repo_file_local))
 
     metainfo_update_items = {}
 
@@ -267,7 +266,7 @@ def run_schema_upgrade(args):
 
   ensure_jdbc_driver_is_installed(args, get_ambari_properties())
 
-  print 'Upgrading database schema'
+  print_info_msg('Upgrading database schema', True)
 
   serverClassPath = ServerClassPath(get_ambari_properties(), args)
   class_path = serverClassPath.get_full_ambari_classpath_escaped_for_shell(validate_classpath=True)
@@ -284,19 +283,19 @@ def run_schema_upgrade(args):
   environ = generate_env(args, ambari_user, current_user)
 
   (retcode, stdout, stderr) = run_os_command(command, env=environ)
-  print_info_msg("Return code from schema upgrade command, retcode = " + str(retcode))
+  print_info_msg("Return code from schema upgrade command, retcode = {0}".format(str(retcode)), True)
   if stdout:
-    print "Console output from schema upgrade command:"
-    print stdout
-    print
-  if stderr:
-    print "Error output from schema upgrade command:"
-    print stderr
+    print_info_msg("Console output from schema upgrade command:", True)
+    print_info_msg(stdout, True)
     print
   if retcode > 0:
     print_error_msg("Error executing schema upgrade, please check the server logs.")
+    if stderr:
+      print_error_msg("Error output from schema upgrade command:")
+      print_error_msg(stderr)
+      print
   else:
-    print_info_msg('Schema upgrade completed')
+    print_info_msg('Schema upgrade completed', True)
   return retcode
 
 
@@ -335,16 +334,17 @@ def move_user_custom_actions():
     raise FatalException(1, err)
 
 def upgrade(args):
-  logger.info("Upgrade ambari-server.")
+  print_info_msg("Upgrade Ambari Server", True)
   if not is_root():
     err = configDefaults.MESSAGE_ERROR_UPGRADE_NOT_ROOT
     raise FatalException(4, err)
-  print 'Updating properties in ' + AMBARI_PROPERTIES_FILE + ' ...'
+  print_info_msg('Updating Ambari Server properties in {0} ...'.format(AMBARI_PROPERTIES_FILE), True)
   retcode = update_ambari_properties()
   if not retcode == 0:
     err = AMBARI_PROPERTIES_FILE + ' file can\'t be updated. Exiting'
     raise FatalException(retcode, err)
 
+  print_info_msg('Updating Ambari Server properties in {0} ...'.format(AMBARI_ENV_FILE), True)
   retcode = update_ambari_env()
   if not retcode == 0:
     err = AMBARI_ENV_FILE + ' file can\'t be updated. Exiting'
@@ -354,7 +354,7 @@ def upgrade(args):
   if retcode == -2:
     pass  # no changes done, let's be silent
   elif retcode == 0:
-    print 'File ' + AMBARI_KRB_JAAS_LOGIN_FILE + ' updated.'
+    print_info_msg("File {0} updated.".format(AMBARI_KRB_JAAS_LOGIN_FILE), True)
   elif not retcode == 0:
     err = AMBARI_KRB_JAAS_LOGIN_FILE + ' file can\'t be updated. Exiting'
     raise FatalException(retcode, err)


[11/41] ambari git commit: AMBARI-19110. "Cluster Load" widget in ambari-dashboard, download as CSV is not working (alexantonenko)

Posted by jo...@apache.org.
AMBARI-19110. "Cluster Load" widget in ambari-dashboard, download as CSV is not working (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0f297510
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0f297510
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0f297510

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 0f297510640f24db5c11bfbabaa390bb29e619a6
Parents: 2cd12ce
Author: Alex Antonenko <hi...@gmail.com>
Authored: Tue Dec 6 18:29:00 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Dec 6 19:04:30 2016 +0200

----------------------------------------------------------------------
 .../common/widgets/export_metrics_mixin.js      | 28 ++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0f297510/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js b/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js
index 7517efd..aa4f77c 100644
--- a/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js
+++ b/ambari-web/app/mixins/common/widgets/export_metrics_mixin.js
@@ -85,6 +85,7 @@ App.ExportMetricsMixin = Em.Mixin.create({
       ticksNumber,
       metricsNumber,
       metricsArray;
+    this.checkGraphDataForValidity(data);
     titles = data.map(function (item) {
       return displayUnit ? item.name + ' (' + displayUnit + ')' : item.name;
     }, this);
@@ -95,12 +96,35 @@ App.ExportMetricsMixin = Em.Mixin.create({
     for (var i = 0; i < ticksNumber; i++) {
       metricsArray.push([data[0].data[i][1]]);
       for (var j = 0; j < metricsNumber; j++) {
-        metricsArray[i + 1].push(data[j].data[i][0]);
+         metricsArray[i + 1].push(data[j].data[i][0]);
       };
-    }
+    };
     return stringUtils.arrayToCSV(metricsArray);
   },
 
+  checkGraphDataForValidity: function (data) {
+    data.sort(function (a, b) {
+      return b.data.length - a.data.length
+    });
+
+    var maxLength = data[0].data.length;
+
+    for (var i = 1; i < data.length; i ++) {
+      if (data[i].data.length !== maxLength) this.fillGraphDataArrayWithMockedData(data[i], maxLength);
+    }
+  },
+
+  fillGraphDataArrayWithMockedData: function (dataArray, neededLength) {
+    var startIndex = dataArray.data.length,
+      timestampInterval = dataArray.data[2][1] - dataArray.data[1][1];
+
+    for (var i = startIndex; i < neededLength; i++) {
+      var previousTimestamp = dataArray.data[i - 1][1];
+
+      dataArray.data.push([null, previousTimestamp + timestampInterval]);
+    }
+  },
+
   jsonReplacer: function () {
     var displayUnit = this.get('targetView.displayUnit');
     return function (key, value) {


[37/41] ambari git commit: AMBARI-19066. Add more logging around status command report processing on server side (magyari_sandor)

Posted by jo...@apache.org.
AMBARI-19066. Add more logging around status command report processing on server side (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/99afb8bf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/99afb8bf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/99afb8bf

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 99afb8bf67c8278d78f960ab2017a6e451cb58ae
Parents: 9fc7f0a
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Wed Dec 7 14:57:16 2016 +0100
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Wed Dec 7 18:19:43 2016 +0100

----------------------------------------------------------------------
 .../apache/ambari/server/agent/AgentRequests.java    |  2 +-
 .../ambari/server/agent/HeartbeatProcessor.java      | 15 ++++++++++-----
 2 files changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/99afb8bf/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
index 2980f38..01195bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/AgentRequests.java
@@ -45,9 +45,9 @@ public class AgentRequests {
 
   public void setExecutionDetailsRequest(String host, String component, String requestExecutionCmd) {
     if (StringUtils.isNotBlank(requestExecutionCmd)) {
-      LOG.debug("Setting need for exec command to " + requestExecutionCmd + " for " + component);
       Map<String, Boolean> perHostRequiresExecCmdDetails = getPerHostRequiresExecCmdDetails(host);
       if (Boolean.TRUE.toString().toUpperCase().equals(requestExecutionCmd.toUpperCase())) {
+        LOG.info("Setting need for exec command to " + requestExecutionCmd + " for " + component);
         perHostRequiresExecCmdDetails.put(component, Boolean.TRUE);
       } else {
         perHostRequiresExecCmdDetails.put(component, Boolean.FALSE);

http://git-wip-us.apache.org/repos/asf/ambari/blob/99afb8bf/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
index 8f4782e..404419c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
@@ -19,8 +19,6 @@ package org.apache.ambari.server.agent;
 
 
 import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -101,6 +99,8 @@ public class HeartbeatProcessor extends AbstractService{
   //TODO rewrite to correlate with heartbeat frequency, hardcoded in agent as of now
   private long delay = 5000;
   private long period = 1000;
+  private long logIntervalSeconds = 30;
+  private long lastStatusMessage = 0;
 
   private int poolSize = 1;
 
@@ -179,12 +179,19 @@ public class HeartbeatProcessor extends AbstractService{
 
     @Override
     public void run() {
+
       while (shouldRun) {
         try {
+          long now = System.currentTimeMillis();
+          if ((now - lastStatusMessage) > (logIntervalSeconds * 1000)) {
+            LOG.info("Queue size: {}", heartBeatsQueue.size());
+            lastStatusMessage = now;
+          }
           HeartBeat heartbeat = pollHeartbeat();
           if (heartbeat == null) {
             break;
           }
+
           processHeartbeat(heartbeat);
         } catch (Exception e) {
           LOG.error("Exception received while processing heartbeat", e);
@@ -201,13 +208,13 @@ public class HeartbeatProcessor extends AbstractService{
   /**
    * Incapsulates logic for processing data from agent heartbeat
    * @param heartbeat Agent heartbeat object
+   * @param now
    * @throws AmbariException
    */
   public void processHeartbeat(HeartBeat heartbeat) throws AmbariException {
     long now = System.currentTimeMillis();
 
     processAlerts(heartbeat);
-
     //process status reports before command reports to prevent status override immediately after task finish
     processStatusReports(heartbeat);
     processCommandReports(heartbeat, now);
@@ -215,8 +222,6 @@ public class HeartbeatProcessor extends AbstractService{
     processHostStatus(heartbeat);
   }
 
-
-
   /**
    * Extracts all of the {@link Alert}s from the heartbeat and fires
    * {@link AlertEvent}s for each one. If there is a problem looking up the


[10/41] ambari git commit: AMBARI-19080 Ambari stack-advisor error while adding knox service (mugdha)

Posted by jo...@apache.org.
AMBARI-19080 Ambari stack-advisor error while adding knox service (mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/149104c2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/149104c2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/149104c2

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 149104c2713f320116fc63daea0260f3eafca61b
Parents: a2f7a030
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Tue Dec 6 14:13:59 2016 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Tue Dec 6 18:34:48 2016 +0530

----------------------------------------------------------------------
 .../stacks/HDP/2.3/services/stack_advisor.py    |  4 ++-
 .../stacks/2.3/common/test_stack_advisor.py     | 29 +++++++++++++++++++-
 2 files changed, 31 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/149104c2/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 5d34779..b6c83bd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -768,7 +768,9 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     knox_port = '8443'
     if 'KNOX' in servicesList:
       knox_hosts = self.getComponentHostNames(services, "KNOX", "KNOX_GATEWAY")
-      knox_host = knox_hosts[0]
+      if len(knox_hosts) > 0:
+        knox_hosts.sort()
+        knox_host = knox_hosts[0]
       if 'gateway-site' in services['configurations'] and 'gateway.port' in services['configurations']["gateway-site"]["properties"]:
         knox_port = services['configurations']["gateway-site"]["properties"]['gateway.port']
       putRangerAdminProperty('ranger.sso.providerurl', 'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port))

http://git-wip-us.apache.org/repos/asf/ambari/blob/149104c2/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index b5ff24c..4a77086 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -1466,6 +1466,31 @@ class TestHDP23StackAdvisor(TestCase):
             }
           ]
         },
+        {
+          "href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX",
+          "StackServices": {
+            "service_name": "KNOX",
+            "service_version": "0.9.0.2.3",
+            "stack_name": "HDP",
+            "stack_version": "2.3"
+          },
+          "components": [
+            {
+              "href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX/components/KNOX_GATEWAY",
+              "StackServiceComponents": {
+                "advertise_version": "false",
+                "cardinality": "1+",
+                "component_category": "MASTER",
+                "component_name": "KNOX_GATEWAY",
+                "display_name": "Knox Gateway",
+                "is_client": "false",
+                "is_master": "true",
+                "hostnames": ["c6401.ambari.apache.org"]
+              },
+              "dependencies": []
+            }
+          ]
+        }
         ],
       "configurations": {
         "admin-properties": {
@@ -1477,6 +1502,7 @@ class TestHDP23StackAdvisor(TestCase):
           "properties": {
             "ranger.service.http.port": "7777",
             "ranger.service.http.enabled": "true",
+            "ranger.sso.providerurl": "",
             }
         }
       },
@@ -1519,7 +1545,8 @@ class TestHDP23StackAdvisor(TestCase):
       'ranger-admin-site': {
         'properties': {
           "ranger.audit.solr.zookeepers": "NONE",
-          "ranger.audit.source.type": "solr"
+          "ranger.audit.source.type": "solr",
+          "ranger.sso.providerurl": "https://c6401.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso"
         }
       },
       'ranger-env': {


[15/41] ambari git commit: AMBARI-19107. False warning about NTP not running on SUSE12. (Attila Doroszlai via swagle)

Posted by jo...@apache.org.
AMBARI-19107. False warning about NTP not running on SUSE12. (Attila Doroszlai via swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/64188616
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/64188616
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/64188616

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 641886160877e4bc4c38e9d461967285d77bd0c3
Parents: 502d444
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Tue Dec 6 12:50:18 2016 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Tue Dec 6 12:50:18 2016 -0800

----------------------------------------------------------------------
 ambari-agent/src/main/python/ambari_agent/HostInfo.py | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/64188616/ambari-agent/src/main/python/ambari_agent/HostInfo.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
index 338daac..3810b52 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
@@ -121,12 +121,12 @@ class HostInfo(object):
     return False
 
 def get_ntp_service():
-  if OSCheck.is_redhat_family() and int(OSCheck.get_os_major_version()) >= 7:
-    return ("chronyd", "ntpd",)
-  elif OSCheck.is_redhat_family():
-    return ("ntpd",)
-  elif OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
-    return ("ntp",)
+  if OSCheck.is_redhat_family():
+    return ("ntpd", "chronyd",)
+  elif OSCheck.is_suse_family():
+    return ("ntpd", "ntp",)
+  elif OSCheck.is_ubuntu_family():
+    return ("ntp", "chrony",)
 
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)


[35/41] ambari git commit: AMBARI-19129. Installer UI hangs when "Validation Issues" dialog is dismissed (alexantonenko)

Posted by jo...@apache.org.
AMBARI-19129. Installer UI hangs when "Validation Issues" dialog is dismissed (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4bc1a73f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4bc1a73f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4bc1a73f

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 4bc1a73f4c598b84b7a6ba368a5f7381abe5113b
Parents: cebdb67
Author: Alex Antonenko <hi...@gmail.com>
Authored: Wed Dec 7 17:34:04 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Wed Dec 7 19:01:03 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/mixins/wizard/assign_master_components.js | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4bc1a73f/ambari-web/app/mixins/wizard/assign_master_components.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/wizard/assign_master_components.js b/ambari-web/app/mixins/wizard/assign_master_components.js
index 7ef7fde..3651462 100644
--- a/ambari-web/app/mixins/wizard/assign_master_components.js
+++ b/ambari-web/app/mixins/wizard/assign_master_components.js
@@ -1202,6 +1202,7 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A
       },
       onSecondary: function () {
         this._super();
+        App.router.set('nextBtnClickInProgress', false);
         self.set('submitButtonClicked', false);
       },
       onClose: function () {


[14/41] ambari git commit: AMBARI-19112 : Grafana start failing at create datasource step (and a minor AbstractUpgradeCatalog log line change).

Posted by jo...@apache.org.
AMBARI-19112 : Grafana start failing at create datasource step (and a minor AbstractUpgradeCatalog log line change).


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/502d4447
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/502d4447
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/502d4447

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 502d4447fa7f78421e606cff83661b41c4923a61
Parents: 9b46fd2
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Tue Dec 6 12:07:38 2016 -0800
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Tue Dec 6 12:07:48 2016 -0800

----------------------------------------------------------------------
 .../apache/ambari/server/upgrade/AbstractUpgradeCatalog.java    | 4 ++--
 .../0.1.0/package/scripts/metrics_grafana_util.py               | 5 +++--
 2 files changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/502d4447/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 7f91a68..4f6cbb7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -585,8 +585,8 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
         }
 
         if (!Maps.difference(oldConfigProperties, mergedProperties).areEqual()) {
-          LOG.info("Applying configuration with tag '{}' to " +
-            "cluster '{}'", newTag, cluster.getClusterName());
+          LOG.info("Applying configuration with tag '{}' and configType '{}' to " +
+            "cluster '{}'", newTag, configType, cluster.getClusterName());
 
           Map<String, Map<String, String>> propertiesAttributes = null;
           if (oldConfig != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/502d4447/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
index 6030fc4..41bd2b4 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/metrics_grafana_util.py
@@ -34,8 +34,8 @@ import ambari_simplejson as json
 import network
 import os
 
-GRAFANA_CONNECT_TRIES = 5
-GRAFANA_CONNECT_TIMEOUT = 10
+GRAFANA_CONNECT_TRIES = 15
+GRAFANA_CONNECT_TIMEOUT = 20
 GRAFANA_SEARCH_BUILTIN_DASHBOARDS = "/api/search?tag=builtin"
 GRAFANA_DATASOURCE_URL = "/api/datasources"
 GRAFANA_DASHBOARDS_URL = "/api/dashboards/db"
@@ -202,6 +202,7 @@ def do_ams_collector_post(metric_collector_host, params):
 
     post_metrics_to_collector(ams_metrics_post_url, metric_collector_host, params.metric_collector_port, params.metric_collector_https_enabled,
                                 metric_json, headers, ca_certs)
+
 def create_ams_datasource():
   import params
   server = Server(protocol = params.ams_grafana_protocol.strip(),


[24/41] ambari git commit: AMBARI-19099. Improve and Fix 'Wizard pages' after new guidelines. (xiwang)

Posted by jo...@apache.org.
AMBARI-19099. Improve and Fix 'Wizard pages' after new guidelines. (xiwang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1e261db0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1e261db0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1e261db0

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 1e261db06128143a4e631aff899704cf12709413
Parents: a62119a
Author: Xi Wang <xi...@apache.org>
Authored: Mon Dec 5 16:01:52 2016 -0800
Committer: Xi Wang <xi...@apache.org>
Committed: Tue Dec 6 17:25:21 2016 -0800

----------------------------------------------------------------------
 .../app/styles/theme/bootstrap-ambari.css       | 25 +++++++++++---------
 ambari-web/app/styles/wizard.less               | 13 ++++++++--
 ambari-web/app/templates/wizard/step8.hbs       |  2 +-
 3 files changed, 26 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1e261db0/ambari-web/app/styles/theme/bootstrap-ambari.css
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/theme/bootstrap-ambari.css b/ambari-web/app/styles/theme/bootstrap-ambari.css
index 738fa59..5a641b3 100644
--- a/ambari-web/app/styles/theme/bootstrap-ambari.css
+++ b/ambari-web/app/styles/theme/bootstrap-ambari.css
@@ -510,19 +510,15 @@ h2.table-title {
   margin: 15px 20px;
 }
 .wizard .wizard-body {
-  background: #ebecf1;
-  /* Old browsers */
-  background: -moz-linear-gradient(to right, #323544, #323544 25%, #ebecf1 25%, #ebecf1);
-  /* FF3.6-15 */
-  background: -webkit-linear-gradient(to right, #323544, #323544 25%, #ebecf1 25%, #ebecf1);
-  /* Chrome10-25,Safari5.1-6 */
-  background: linear-gradient(to right, #323544, #323544 25%, #ebecf1 25%, #ebecf1);
-  /* W3C, IE10+, FF16+, Chrome26+, Opera12+, Safari7+ */
+  overflow: hidden;
   margin: 0;
 }
 .wizard .wizard-body .wizard-content {
   background: #ebecf1;
   padding-top: 25px;
+  float: left;
+  margin-bottom: -99999px;
+  padding-bottom: 99999px;
 }
 .wizard .wizard-body .wizard-content .step-title {
   font-family: 'Roboto', sans-serif;
@@ -554,6 +550,9 @@ h2.table-title {
   min-height: 650px;
   padding-top: 25px;
   background-color: #323544;
+  float: left;
+  margin-bottom: -99999px;
+  padding-bottom: 99999px;
 }
 .wizard .wizard-body .wizard-nav .nav li {
   padding: 10px 0;
@@ -588,6 +587,9 @@ h2.table-title {
   color: #bbbbbb;
   margin-left: 30px;
 }
+.wizard .wizard-body .wizard-nav .nav li .step-index {
+  line-height: 18px;
+}
 .wizard .wizard-body .wizard-nav .nav li .step-description {
   font-family: 'Roboto', sans-serif;
   font-weight: normal;
@@ -602,7 +604,7 @@ h2.table-title {
   background-color: #1EB475;
   color: white;
   font-size: 10px;
-  padding-left: 3px;
+  padding-left: 2px;
 }
 .wizard .wizard-body .wizard-nav .nav li.completed .step-marker .step-index {
   display: none;
@@ -634,8 +636,9 @@ h2.table-title {
   border-color: #bbbbbb;
 }
 .wizard .wizard-body .wizard-nav .nav li.disabled.completed .step-marker {
-  background-color: #bbbbbb;
-  color: #323544;
+  background-color: #1EB475;
+  border: 2px solid #1EB475;
+  color: white;
 }
 .wizard .wizard-body .wizard-nav .nav-pills > li.active > a,
 .wizard .wizard-body .wizard-nav .nav-pills > li.active > a:focus,

http://git-wip-us.apache.org/repos/asf/ambari/blob/1e261db0/ambari-web/app/styles/wizard.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/wizard.less b/ambari-web/app/styles/wizard.less
index 57dae45..9920319 100644
--- a/ambari-web/app/styles/wizard.less
+++ b/ambari-web/app/styles/wizard.less
@@ -36,7 +36,6 @@
     padding-right: 15px;
   }
   .btn-area {
-    margin-top: 20px;
     .btn.btn-info {
       margin-right: 10px;
     }
@@ -433,7 +432,7 @@
     color: #fff;
     cursor: pointer;
     font-weight: bold;
-    text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5);
+    line-height: 16px;
     &:hover {
       background-color: @green;
     }
@@ -471,6 +470,12 @@
   ul {
     li {
       list-style: none;
+      span.text {
+        line-height: 1.5;
+      }
+      b.service-name {
+        line-height: 2;
+      }
     }
   }
   max-height: 440px;
@@ -490,6 +495,10 @@
   ul {
     li {
       list-style: none;
+      span.text-info,
+      span.text-success {
+        line-height: 1.5;
+      }
     }
   }
   max-height: 440px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1e261db0/ambari-web/app/templates/wizard/step8.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step8.hbs b/ambari-web/app/templates/wizard/step8.hbs
index 5c2e8e6..9e50e30 100644
--- a/ambari-web/app/templates/wizard/step8.hbs
+++ b/ambari-web/app/templates/wizard/step8.hbs
@@ -63,7 +63,7 @@
               <ul>
                 {{#each controller.services}}
                   <li>
-                    <em><b>{{display_name}}</b></em>
+                    <em><b class="service-name">{{display_name}}</b></em>
                     <ul>
                       {{#each component in this.service_components}}
                         <li>


[29/41] ambari git commit: AMBARI-19089. Manage Journalnode Wizard is stuck at Add/Remove JournalNodes in Kerberized Cluster (akovalenko)

Posted by jo...@apache.org.
AMBARI-19089. Manage Journalnode Wizard is stuck at Add/Remove JournalNodes in Kerberized Cluster (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/49d08958
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/49d08958
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/49d08958

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 49d089586381f44882c57e0a5b8e029e775fe1de
Parents: 165c4d6
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Wed Dec 7 12:54:02 2016 +0200
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Wed Dec 7 13:01:58 2016 +0200

----------------------------------------------------------------------
 .../admin/highAvailability/journalNode/step4_controller.js     | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/49d08958/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js
index 329b8fe..1938527 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/journalNode/step4_controller.js
@@ -60,11 +60,7 @@ App.ManageJournalNodeWizardStep4Controller = App.ManageJournalNodeProgressPageCo
 
   reconfigureHDFS: function () {
     var data = this.get('content.serviceConfigProperties');
-    if (App.get('isKerberosEnabled')) {
-      // TODO this.reconfigureSecureHDFS();
-    } else {
-      this.updateConfigProperties(data);
-    }
+    this.updateConfigProperties(data);
   },
 
   /**


[32/41] ambari git commit: AMBARI-19004 Do not cache 'No logs' for host components at the Ambari Server Hosts/Logs tab back end

Posted by jo...@apache.org.
AMBARI-19004 Do not cache 'No logs' for host components at the Ambari Server Hosts/Logs tab back end

Change-Id: Ie9e8214fe4f2e8a8d86b3229193883dab8345344


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9e1318ec
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9e1318ec
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9e1318ec

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 9e1318ecc2e95c72cc0980cd4d8da1c04210b46b
Parents: 234838f
Author: Miklos Gergely <mg...@hortonworks.com>
Authored: Wed Dec 7 16:29:47 2016 +0100
Committer: Miklos Gergely <mg...@hortonworks.com>
Committed: Wed Dec 7 16:35:04 2016 +0100

----------------------------------------------------------------------
 .../logging/LogSearchDataRetrievalService.java  |   7 +-
 .../LogSearchDataRetrievalServiceTest.java      | 142 +++++++++----------
 2 files changed, 73 insertions(+), 76 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9e1318ec/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
index 1c135b2..5c0bdb1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalService.java
@@ -23,9 +23,9 @@ import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.AbstractService;
 import com.google.inject.Inject;
 import org.apache.ambari.server.AmbariService;
-import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.commons.collections.CollectionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -64,9 +64,6 @@ public class LogSearchDataRetrievalService extends AbstractService {
   private static Logger LOG = LoggerFactory.getLogger(LogSearchDataRetrievalService.class);
 
   @Inject
-  private Configuration configuration;
-
-  @Inject
   private LoggingRequestHelperFactory loggingRequestHelperFactory;
 
   /**
@@ -288,7 +285,7 @@ public class LogSearchDataRetrievalService extends AbstractService {
             helper.sendGetLogFileNamesRequest(component, host);
 
           // update the cache if result is available
-          if (logFileNamesResult != null) {
+          if (CollectionUtils.isNotEmpty(logFileNamesResult)) {
             LOG.debug("LogSearchFileNameRequestRunnable: request was successful, updating cache");
             final String key = generateKey(component, host);
             // update cache with returned result

http://git-wip-us.apache.org/repos/asf/ambari/blob/9e1318ec/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
index b58350b..a36ce0d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/logging/LogSearchDataRetrievalServiceTest.java
@@ -23,7 +23,6 @@ import org.easymock.EasyMockSupport;
 import org.junit.Test;
 
 import java.util.Collections;
-import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.Executor;
 
@@ -51,8 +50,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
 
     LoggingRequestHelper helperMock =
       mockSupport.createMock(LoggingRequestHelper.class);
@@ -62,8 +60,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     mockSupport.replayAll();
 
-    LogSearchDataRetrievalService retrievalService =
-      new LogSearchDataRetrievalService();
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
     retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
     // call the initialization routine called by the Google framework
     retrievalService.doStart();
@@ -71,8 +68,7 @@ public class LogSearchDataRetrievalServiceTest {
     String resultTailFileURI =
       retrievalService.getLogFileTailURI("http://localhost", expectedComponentName, expectedHostName, expectedClusterName);
 
-    assertEquals("TailFileURI was not returned as expected",
-                 expectedResultURI, resultTailFileURI);
+    assertEquals("TailFileURI was not returned as expected", expectedResultURI, resultTailFileURI);
 
     mockSupport.verifyAll();
   }
@@ -85,8 +81,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
 
     // return null, to simulate the case where LogSearch Server is
     // not available for some reason
@@ -94,8 +89,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     mockSupport.replayAll();
 
-    LogSearchDataRetrievalService retrievalService =
-      new LogSearchDataRetrievalService();
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
     retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
     // call the initialization routine called by the Google framework
     retrievalService.doStart();
@@ -103,11 +97,9 @@ public class LogSearchDataRetrievalServiceTest {
     String resultTailFileURI =
       retrievalService.getLogFileTailURI("http://localhost", expectedComponentName, expectedHostName, expectedClusterName);
 
-    assertNull("TailFileURI should be null in this case",
-               resultTailFileURI);
+    assertNull("TailFileURI should be null in this case", resultTailFileURI);
 
     mockSupport.verifyAll();
-
   }
 
   @Test
@@ -118,11 +110,9 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
 
-    Executor executorMock =
-      mockSupport.createMock(Executor.class);
+    Executor executorMock = mockSupport.createMock(Executor.class);
 
     // expect the executor to be called to execute the LogSearch request
     executorMock.execute(isA(LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable.class));
@@ -131,24 +121,19 @@ public class LogSearchDataRetrievalServiceTest {
 
     mockSupport.replayAll();
 
-    LogSearchDataRetrievalService retrievalService =
-      new LogSearchDataRetrievalService();
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
     retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
     // call the initialization routine called by the Google framework
     retrievalService.doStart();
     retrievalService.setExecutor(executorMock);
 
 
-    assertEquals("Default request set should be empty",
-                 0, retrievalService.getCurrentRequests().size());
+    assertEquals("Default request set should be empty", 0, retrievalService.getCurrentRequests().size());
 
-    Set<String> resultSet =
-      retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
+    Set<String> resultSet = retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
 
-    assertNull("Inital query on the retrieval service should be null, since cache is empty by default",
-                resultSet);
-    assertEquals("Incorrect number of entries in the current request set",
-                 1, retrievalService.getCurrentRequests().size());
+    assertNull("Inital query on the retrieval service should be null, since cache is empty by default", resultSet);
+    assertEquals("Incorrect number of entries in the current request set", 1, retrievalService.getCurrentRequests().size());
     assertTrue("Incorrect HostComponent set on request set",
                 retrievalService.getCurrentRequests().contains(expectedComponentName + "+" + expectedHostName));
 
@@ -163,16 +148,13 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
 
-    Executor executorMock =
-      mockSupport.createMock(Executor.class);
+    Executor executorMock = mockSupport.createMock(Executor.class);
 
     mockSupport.replayAll();
 
-    LogSearchDataRetrievalService retrievalService =
-      new LogSearchDataRetrievalService();
+    LogSearchDataRetrievalService retrievalService = new LogSearchDataRetrievalService();
     retrievalService.setLoggingRequestHelperFactory(helperFactoryMock);
     // call the initialization routine called by the Google framework
     retrievalService.doStart();
@@ -184,16 +166,15 @@ public class LogSearchDataRetrievalServiceTest {
     // but is not yet completed.
     retrievalService.getCurrentRequests().add(expectedComponentName + "+" + expectedHostName);
 
-    Set<String> resultSet =
-      retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
+    Set<String> resultSet = retrievalService.getLogFileNames(expectedComponentName, expectedHostName, expectedClusterName);
 
-    assertNull("Inital query on the retrieval service should be null, since cache is empty by default",
-      resultSet);
+    assertNull("Inital query on the retrieval service should be null, since cache is empty by default", resultSet);
 
     mockSupport.verifyAll();
   }
 
   @Test
+  @SuppressWarnings("unchecked")
   public void testRunnableWithSuccessfulCall() throws Exception {
     final String expectedHostName = "c6401.ambari.apache.org";
     final String expectedComponentName = "DATANODE";
@@ -202,17 +183,12 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
-    AmbariManagementController controllerMock =
-      mockSupport.createMock(AmbariManagementController.class);
-    LoggingRequestHelper helperMock =
-      mockSupport.createMock(LoggingRequestHelper.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+    AmbariManagementController controllerMock = mockSupport.createMock(AmbariManagementController.class);
+    LoggingRequestHelper helperMock = mockSupport.createMock(LoggingRequestHelper.class);
 
-    Cache cacheMock =
-      mockSupport.createMock(Cache.class);
-    Set currentRequestsMock =
-      mockSupport.createMock(Set.class);
+    Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
+    Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
 
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
     expect(helperMock.sendGetLogFileNamesRequest(expectedComponentName, expectedHostName)).andReturn(Collections.singleton("/this/is/just/a/test/directory"));
@@ -225,14 +201,14 @@ public class LogSearchDataRetrievalServiceTest {
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-                                                                         cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
     loggingRunnable.run();
 
     mockSupport.verifyAll();
-
   }
 
   @Test
+  @SuppressWarnings("unchecked")
   public void testRunnableWithFailedCallNullHelper() throws Exception {
     final String expectedHostName = "c6401.ambari.apache.org";
     final String expectedComponentName = "DATANODE";
@@ -241,15 +217,11 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
-    AmbariManagementController controllerMock =
-      mockSupport.createMock(AmbariManagementController.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+    AmbariManagementController controllerMock = mockSupport.createMock(AmbariManagementController.class);
 
-    Cache cacheMock =
-      mockSupport.createMock(Cache.class);
-    Set currentRequestsMock =
-      mockSupport.createMock(Set.class);
+    Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
+    Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
 
     // return null to simulate an error during helper instance creation
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(null);
@@ -261,7 +233,7 @@ public class LogSearchDataRetrievalServiceTest {
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-        cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
     loggingRunnable.run();
 
     mockSupport.verifyAll();
@@ -269,6 +241,7 @@ public class LogSearchDataRetrievalServiceTest {
   }
 
   @Test
+  @SuppressWarnings("unchecked")
   public void testRunnableWithFailedCallNullResult() throws Exception {
     final String expectedHostName = "c6401.ambari.apache.org";
     final String expectedComponentName = "DATANODE";
@@ -277,17 +250,12 @@ public class LogSearchDataRetrievalServiceTest {
 
     EasyMockSupport mockSupport = new EasyMockSupport();
 
-    LoggingRequestHelperFactory helperFactoryMock =
-      mockSupport.createMock(LoggingRequestHelperFactory.class);
-    AmbariManagementController controllerMock =
-      mockSupport.createMock(AmbariManagementController.class);
-    LoggingRequestHelper helperMock =
-      mockSupport.createMock(LoggingRequestHelper.class);
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+    AmbariManagementController controllerMock = mockSupport.createMock(AmbariManagementController.class);
+    LoggingRequestHelper helperMock = mockSupport.createMock(LoggingRequestHelper.class);
 
-    Cache cacheMock =
-      mockSupport.createMock(Cache.class);
-    Set currentRequestsMock =
-      mockSupport.createMock(Set.class);
+    Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
+    Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
 
     expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
     // return null to simulate an error occurring during the LogSearch data request
@@ -300,11 +268,43 @@ public class LogSearchDataRetrievalServiceTest {
 
     LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
       new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
-        cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
     loggingRunnable.run();
 
     mockSupport.verifyAll();
-
   }
 
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testRunnableWithFailedCallEmptyResult() throws Exception {
+    final String expectedHostName = "c6401.ambari.apache.org";
+    final String expectedComponentName = "DATANODE";
+    final String expectedClusterName = "clusterone";
+    final String expectedComponentAndHostName = expectedComponentName + "+" + expectedHostName;
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    LoggingRequestHelperFactory helperFactoryMock = mockSupport.createMock(LoggingRequestHelperFactory.class);
+    AmbariManagementController controllerMock = mockSupport.createMock(AmbariManagementController.class);
+    LoggingRequestHelper helperMock = mockSupport.createMock(LoggingRequestHelper.class);
+
+    Cache<String, Set<String>> cacheMock = mockSupport.createMock(Cache.class);
+    Set<String> currentRequestsMock = mockSupport.createMock(Set.class);
+
+    expect(helperFactoryMock.getHelper(controllerMock, expectedClusterName)).andReturn(helperMock);
+    // return null to simulate an error occurring during the LogSearch data request
+    expect(helperMock.sendGetLogFileNamesRequest(expectedComponentName, expectedHostName)).andReturn(Collections.EMPTY_SET);
+    // expect that the completed request is removed from the current request set,
+    // even in the event of a failure to obtain the LogSearch data
+    expect(currentRequestsMock.remove(expectedComponentAndHostName)).andReturn(true).once();
+
+    mockSupport.replayAll();
+
+    LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable loggingRunnable =
+      new LogSearchDataRetrievalService.LogSearchFileNameRequestRunnable(expectedHostName, expectedComponentName, expectedClusterName,
+          cacheMock, currentRequestsMock, helperFactoryMock, controllerMock);
+    loggingRunnable.run();
+
+    mockSupport.verifyAll();
+  }
 }


[18/41] ambari git commit: AMBARI-19091. HDP 3.0 TP - bootstrap the stack with metainfo, cluster-env, repos, and hooks (alejandro)

Posted by jo...@apache.org.
AMBARI-19091. HDP 3.0 TP - bootstrap the stack with metainfo, cluster-env, repos, and hooks (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/757267fc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/757267fc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/757267fc

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 757267fc6c62dff71aeb162976de39278d40aa04
Parents: 9a938e6
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Fri Dec 2 13:26:49 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Tue Dec 6 16:51:28 2016 -0800

----------------------------------------------------------------------
 .../HDP/3.0/configuration/cluster-env.xml       | 293 +++++++++++++++++
 .../HDP/3.0/hooks/after-INSTALL/scripts/hook.py |  37 +++
 .../3.0/hooks/after-INSTALL/scripts/params.py   |  97 ++++++
 .../scripts/shared_initialization.py            | 111 +++++++
 .../hooks/before-ANY/files/changeToSecureUid.sh |  53 +++
 .../HDP/3.0/hooks/before-ANY/scripts/hook.py    |  36 ++
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  | 231 +++++++++++++
 .../before-ANY/scripts/shared_initialization.py | 226 +++++++++++++
 .../3.0/hooks/before-INSTALL/scripts/hook.py    |  37 +++
 .../3.0/hooks/before-INSTALL/scripts/params.py  | 113 +++++++
 .../scripts/repo_initialization.py              |  68 ++++
 .../scripts/shared_initialization.py            |  37 +++
 .../3.0/hooks/before-RESTART/scripts/hook.py    |  29 ++
 .../hooks/before-START/files/checkForFormat.sh  |  65 ++++
 .../before-START/files/fast-hdfs-resource.jar   | Bin 0 -> 19285850 bytes
 .../before-START/files/task-log4j.properties    | 134 ++++++++
 .../hooks/before-START/files/topology_script.py |  66 ++++
 .../HDP/3.0/hooks/before-START/scripts/hook.py  |  39 +++
 .../3.0/hooks/before-START/scripts/params.py    | 326 +++++++++++++++++++
 .../before-START/scripts/rack_awareness.py      |  47 +++
 .../scripts/shared_initialization.py            | 191 +++++++++++
 .../templates/commons-logging.properties.j2     |  43 +++
 .../templates/exclude_hosts_list.j2             |  21 ++
 .../templates/hadoop-metrics2.properties.j2     | 105 ++++++
 .../before-START/templates/health_check.j2      |  81 +++++
 .../templates/include_hosts_list.j2             |  21 ++
 .../templates/topology_mappings.data.j2         |  24 ++
 .../main/resources/stacks/HDP/3.0/kerberos.json |  78 +++++
 .../main/resources/stacks/HDP/3.0/metainfo.xml  |  24 ++
 .../HDP/3.0/properties/stack_features.json      | 323 ++++++++++++++++++
 .../stacks/HDP/3.0/properties/stack_tools.json  |   4 +
 .../resources/stacks/HDP/3.0/repos/repoinfo.xml | 132 ++++++++
 .../main/resources/stacks/HDP/3.0/widgets.json  |  95 ++++++
 33 files changed, 3187 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
new file mode 100644
index 0000000..93680bf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/configuration/cluster-env.xml
@@ -0,0 +1,293 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>recovery_enabled</name>
+    <value>true</value>
+    <description>Auto start enabled or not for this cluster.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_type</name>
+    <value>AUTO_START</value>
+    <description>Auto start type.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_lifetime_max_count</name>
+    <value>1024</value>
+    <description>Auto start lifetime maximum count of recovery attempt allowed per host component. This is reset when agent is restarted.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_max_count</name>
+    <value>6</value>
+    <description>Auto start maximum count of recovery attempt allowed per host component in a window. This is reset when agent is restarted.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_window_in_minutes</name>
+    <value>60</value>
+    <description>Auto start recovery window size in minutes.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>recovery_retry_interval</name>
+    <value>5</value>
+    <description>Auto start recovery retry gap between tries per host component.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <display-name>Skip group modifications during install</display-name>
+    <value>false</value>
+    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+    <description>Whether to ignore failures on users and group creation</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>sysprep_skip_create_users_and_groups</name>
+    <display-name>Whether to skip creating users and groups in a sysprepped cluster</display-name>
+    <value>false</value>
+    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+    <description>Whether to skip creating users and groups in a sysprepped cluster</description>
+    <value-attributes>
+      <overridable>true</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>sysprep_skip_copy_fast_jar_hdfs</name>
+    <display-name>Whether to skip copying the tarballs to HDFS on a sysprepped cluster</display-name>
+    <value>false</value>
+    <description>Whether to skip copying the tarballs to HDFS on a sysprepped cluster, during both fresh install and stack upgrade</description>
+    <value-attributes>
+      <overridable>true</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>sysprep_skip_copy_tarballs_hdfs</name>
+    <display-name>Whether to skip copying the tarballs to HDFS on a sysprepped cluster</display-name>
+    <value>false</value>
+    <description>Whether to skip copying the tarballs to HDFS on a sysprepped cluster, during both fresh install and stack upgrade</description>
+    <value-attributes>
+      <overridable>true</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>sysprep_skip_copy_oozie_share_lib_to_hdfs</name>
+    <display-name>Whether to skip copying the Oozie share lib to HDFS on sysprepped cluster</display-name>
+    <value>false</value>
+    <description>Whether to skip copying the Oozie share lib to HDFS on sysprepped cluster, during both fresh install and stack upgrade</description>
+    <value-attributes>
+      <overridable>true</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <display-name>Smoke User</display-name>
+    <value>ambari-qa</value>
+    <property-type>USER</property-type>
+    <description>User executing service checks</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>smokeuser_keytab</name>
+    <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+    <description>Path to smoke test user keytab file</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>user_group</name>
+    <display-name>Hadoop Group</display-name>
+    <value>hadoop</value>
+    <property-type>GROUP</property-type>
+    <description>Hadoop user group.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>repo_suse_rhel_template</name>
+    <value>[{{repo_id}}]
+name={{repo_id}}
+{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}
+
+path=/
+enabled=1
+gpgcheck=0</value>
+    <description>Template of repositories for rhel and suse.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>repo_ubuntu_template</name>
+    <value>{{package_type}} {{base_url}} {{components}}</value>
+    <description>Template of repositories for ubuntu.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>override_uid</name>
+    <value>true</value>
+    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+    <display-name>Have Ambari manage UIDs</display-name>
+    <description>Have Ambari manage UIDs</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>fetch_nonlocal_groups</name>
+    <value>true</value>
+    <display-name>Ambari fetch nonlocal groups</display-name>
+    <description>Ambari requires fetching all the groups. This can be slow
+        on envs with enabled ldap. Setting this option to false will enable Ambari,
+        to skip user/group management connected with ldap groups.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>managed_hdfs_resource_property_names</name>
+    <value/>
+    <description>Comma separated list of property names with HDFS resource paths.
+        Resource from this list will be managed even if it is marked as not managed in the stack</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_tools</name>
+    <value/>
+    <description>Stack specific tools</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_tools.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_features</name>
+    <value/>
+    <description>List of features supported by the stack</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_features.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>stack_root</name>
+    <value>/usr/hdp</value>
+    <description>Stack root folder</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>alerts_repeat_tolerance</name>
+    <value>1</value>
+    <description>The number of consecutive alerts required to transition an alert from the SOFT to the HARD state.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ignore_bad_mounts</name>
+    <value>false</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari not to create any directories.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>manage_dirs_on_root</name>
+    <value>true</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari to manage (create and set permissions) unknown directories on / partition</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>one_dir_per_partition</name>
+    <value>false</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hide_yarn_memory_widget</name>
+    <value>false</value>
+    <description>YARN Memory widget should be hidden by default on the dashboard.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+   <property>
+    <name>agent_mounts_ignore_list</name>
+    <value/>
+    <description>Comma separated list of the mounts which would be ignored by Ambari during property values suggestion by Stack Advisor</description>
+    <on-ambari-upgrade add="false"/>
+    <value-attributes>
+      <visible>true</visible>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..8a583b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.hook import Hook
+from shared_initialization import link_configs
+from shared_initialization import setup_config
+from shared_initialization import setup_stack_symlinks
+
+class AfterInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_stack_symlinks()
+    setup_config()
+
+    link_configs(self.stroutfile)
+
+if __name__ == "__main__":
+  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
new file mode 100644
index 0000000..566f5b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
@@ -0,0 +1,97 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.version import format_stack_version
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+dfs_type = default("/commandParams/dfs_type", "")
+stack_root = Script.get_stack_root()
+
+is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+# current host stack version
+current_version = default("/hostLevelParams/current_version", None)
+
+# default hadoop params
+mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_conf_empty_dir = None
+
+versioned_stack_root = format('{stack_root}/current')
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#users and groups
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_host) == 0
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
+stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
+
+upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..e9f2283
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,111 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+import ambari_simplejson as json
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script import Script
+
+
+def setup_stack_symlinks():
+  """
+  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
+  stack version, such as "2.3". This should always be called after a component has been
+  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
+  interact with this since it's done via a custom command and will not trigger this hook.
+  :return:
+  """
+  import params
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
+    # try using the exact version first, falling back in just the stack if it's not defined
+    # which would only be during an intial cluster installation
+    version = params.current_version if params.current_version is not None else params.stack_version_unformatted
+
+    if not params.upgrade_suspended:
+      if params.host_sys_prepped:
+        Logger.warning("Skipping running stack-selector-tool for stack {0} as its a sys_prepped host. This may cause symlink pointers not to be created for HDP componets installed later on top of an already sys_prepped host.".format(version))
+        return
+      # On parallel command execution this should be executed by a single process at a time.
+      with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+        stack_select.select_all(version)
+
+def setup_config():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+
+  is_hadoop_conf_dir_present = False
+  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
+    is_hadoop_conf_dir_present = True
+  else:
+    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
+
+  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
+    # create core-site only if the hadoop config diretory exists
+    XmlConfig("core-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))
+
+
+def load_version(struct_out_file):
+  """
+  Load version from file.  Made a separate method for testing
+  """
+  json_version = None
+  try:
+    if os.path.exists(struct_out_file):
+      with open(struct_out_file, 'r') as fp:
+        json_info = json.load(fp)
+        json_version = json_info['version']
+  except:
+    pass
+
+  return json_version
+  
+
+def link_configs(struct_out_file):
+  """
+  Links configs, only on a fresh install of HDP-2.3 and higher
+  """
+  import params
+
+  if not Script.is_stack_greater_or_equal("2.3"):
+    Logger.info("Can only link configs for HDP-2.3 and higher.")
+    return
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
+    return
+
+  # On parallel command execution this should be executed by a single process at a time.
+  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+    for k, v in conf_select.get_package_dirs().iteritems():
+      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
new file mode 100644
index 0000000..08542c4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+find_available_uid
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+set -e
+
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
new file mode 100644
index 0000000..c34be0b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from shared_initialization import *
+
+class BeforeAnyHook(Hook):
+
+  def hook(self, env):
+    import params
+    env.set_params(params)
+
+    setup_users()
+    if params.has_namenode or params.dfs_type == 'HCFS':
+      setup_hadoop_env()
+    setup_java()
+
+if __name__ == "__main__":
+  BeforeAnyHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
new file mode 100644
index 0000000..f70c8e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
@@ -0,0 +1,231 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import collections
+import re
+import os
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.expect import expect
+from ambari_commons.os_check import OSCheck
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+dfs_type = default("/commandParams/dfs_type", "")
+stack_root = Script.get_stack_root()
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+jdk_location = config['hostLevelParams']['jdk_location']
+
+sudo = AMBARI_SUDO_BINARY
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
+version = default("/commandParams/version", None)
+# Handle upgrade and downgrade
+if (upgrade_type is not None) and version:
+  stack_version_formatted = format_stack_version(version)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+secure_dn_ports_are_in_use = False
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False
+
+# hadoop default params
+mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
+
+# upgrades would cause these directories to have a version instead of "current"
+# which would cause a lot of problems when writing out hadoop-env.sh; instead
+# force the use of "current" in the hook
+hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
+hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+
+hadoop_conf_empty_dir = None
+hadoop_secure_dn_user = hdfs_user
+hadoop_dir = "/etc/hadoop"
+versioned_stack_root = format('{stack_root}/current')
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
+is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
+
+if not security_enabled:
+  hadoop_secure_dn_user = '""'
+else:
+  dfs_dn_port = get_port(dfs_dn_addr)
+  dfs_dn_http_port = get_port(dfs_dn_http_addr)
+  dfs_dn_https_port = get_port(dfs_dn_https_addr)
+  # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+  if dfs_http_policy == "HTTPS_ONLY":
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+  elif dfs_http_policy == "HTTP_AND_HTTPS":
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+  if secure_dn_ports_are_in_use:
+    hadoop_secure_dn_user = hdfs_user
+  else:
+    hadoop_secure_dn_user = '""'
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+oozie_user = config['configurations']['oozie-env']["oozie_user"]
+falcon_user = config['configurations']['falcon-env']["falcon_user"]
+ranger_user = config['configurations']['ranger-env']["ranger_user"]
+zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
+zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_tez = 'tez-site' in config['configurations']
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_falcon_server_hosts = not len(falcon_server_hosts) == 0
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+has_zeppelin_master = not len(zeppelin_master_hosts) == 0
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+ranger_group = config['configurations']['ranger-env']['ranger_group']
+dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
+
+sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False)
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+user_to_groups_dict = collections.defaultdict(lambda:[user_group])
+user_to_groups_dict[smoke_user] = [proxyuser_group]
+if has_ganglia_server:
+  user_to_groups_dict[gmond_user] = [gmond_user]
+  user_to_groups_dict[gmetad_user] = [gmetad_user]
+if has_tez:
+  user_to_groups_dict[tez_user] = [proxyuser_group]
+if has_oozie_server:
+  user_to_groups_dict[oozie_user] = [proxyuser_group]
+if has_falcon_server_hosts:
+  user_to_groups_dict[falcon_user] = [proxyuser_group]
+if has_ranger_admin:
+  user_to_groups_dict[ranger_user] = [ranger_group]
+if has_zeppelin_master:
+  user_to_groups_dict[zeppelin_user] = [zeppelin_group, user_group]
+
+user_to_gid_dict = collections.defaultdict(lambda:user_group)
+
+user_list = json.loads(config['hostLevelParams']['user_list'])
+group_list = json.loads(config['hostLevelParams']['group_list'])
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
+override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
new file mode 100644
index 0000000..320872e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -0,0 +1,226 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import getpass
+import tempfile
+from copy import copy
+from resource_management.libraries.functions.version import compare_versions
+from resource_management import *
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+
+  should_create_users_and_groups = False
+  if params.host_sys_prepped:
+    should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
+  else:
+    should_create_users_and_groups = not params.ignore_groupsusers_create
+
+  if should_create_users_and_groups:
+    for group in params.group_list:
+      Group(group,
+      )
+
+    for user in params.user_list:
+      User(user,
+          gid = params.user_to_gid_dict[user],
+          groups = params.user_to_groups_dict[user],
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+      )
+
+    if params.override_uid == "true":
+      set_uid(params.smoke_user, params.smoke_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
+  else:
+    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
+    pass
+
+
+  if params.has_hbase_masters:
+    Directory (params.hbase_tmp_dir,
+               owner = params.hbase_user,
+               mode=0775,
+               create_parents = True,
+               cd_access="a",
+    )
+    if params.override_uid == "true":
+      set_uid(params.hbase_user, params.hbase_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for hbase user as host is sys prepped')
+
+  if should_create_users_and_groups:
+    if params.has_namenode:
+      create_dfs_cluster_admins()
+    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
+      create_tez_am_view_acls()
+  else:
+    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
+
+def create_dfs_cluster_admins():
+  """
+  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
+
+  User(params.hdfs_user,
+    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+  )
+
+def create_tez_am_view_acls():
+
+  """
+  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  if not params.tez_am_view_acls.startswith("*"):
+    create_users_and_groups(params.tez_am_view_acls)
+
+def create_users_and_groups(user_and_groups):
+
+  import params
+
+  parts = re.split('\s', user_and_groups)
+  if len(parts) == 1:
+    parts.append("")
+
+  users_list = parts[0].split(",") if parts[0] else []
+  groups_list = parts[1].split(",") if parts[1] else []
+
+  if users_list:
+    User(users_list,
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+    )
+
+  if groups_list:
+    Group(copy(groups_list),
+    )
+  return groups_list
+    
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  import params
+
+  File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
+          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
+    
+def setup_hadoop_env():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+
+    # create /etc/hadoop
+    Directory(params.hadoop_dir, mode=0755)
+
+    # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
+    if Script.is_stack_less_than("2.2"):
+      Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
+        group=params.user_group )
+
+      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}"))
+
+    # write out hadoop-env.sh, but only if the directory exists
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
+        group=params.user_group,
+        content=InlineTemplate(params.hadoop_env_sh_template))
+
+    # Create tmp dir for java.io.tmpdir
+    # Handle a situation when /tmp is set to noexec
+    Directory(params.hadoop_java_io_tmpdir,
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=01777
+    )
+
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  java_exec = format("{java_home}/bin/java")
+
+  if not os.path.isfile(java_exec):
+    if not params.jdk_name: # if custom jdk is used.
+      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
+
+    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    java_dir = os.path.dirname(params.java_home)
+
+    Directory(params.artifact_dir,
+              create_parents = True,
+              )
+
+    File(jdk_curl_target,
+         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         not_if = format("test -f {jdk_curl_target}")
+    )
+
+    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
+
+    try:
+      if params.jdk_name.endswith(".bin"):
+        chmod_cmd = ("chmod", "+x", jdk_curl_target)
+        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+      elif params.jdk_name.endswith(".gz"):
+        chmod_cmd = ("chmod","a+x", java_dir)
+        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+
+      Directory(java_dir
+      )
+
+      Execute(chmod_cmd,
+              sudo = True,
+              )
+
+      Execute(install_cmd,
+              )
+
+    finally:
+      Directory(tmp_java_dir, action="delete")
+
+    File(format("{java_home}/bin/java"),
+         mode=0755,
+         cd_access="a",
+         )
+    Execute(('chmod', '-R', '755', params.java_home),
+      sudo = True,
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..ce17776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+from repo_initialization import *
+
+class BeforeInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+    
+    install_repos()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
new file mode 100644
index 0000000..6193c11
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,113 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.core.system import System
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import default, format
+from resource_management.libraries.functions.expect import expect
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
+# repo templates
+repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
+repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
+
+has_sqoop_client = 'sqoop-env' in config['configurations']
+has_namenode = not len(namenode_host) == 0
+has_hs = not len(hs_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_storm_server = not len(storm_server_hosts) == 0
+has_falcon_server = not len(falcon_host) == 0
+has_tez = 'tez-site' in config['configurations']
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
new file mode 100644
index 0000000..a35dce7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
@@ -0,0 +1,68 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.resources.repository import Repository
+from resource_management.core.logger import Logger
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+# components_lits = repoName + postfix
+_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
+
+def _alter_repo(action, repo_string, repo_template):
+  """
+  @param action: "delete" or "create"
+  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+  """
+  repo_dicts = json.loads(repo_string)
+
+  if not isinstance(repo_dicts, list):
+    repo_dicts = [repo_dicts]
+
+  if 0 == len(repo_dicts):
+    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
+  else:
+    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
+
+  for repo in repo_dicts:
+    if not 'baseUrl' in repo:
+      repo['baseUrl'] = None
+    if not 'mirrorsList' in repo:
+      repo['mirrorsList'] = None
+    
+    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
+    
+    Repository(repo['repoId'],
+               action = action,
+               base_url = repo['baseUrl'],
+               mirror_list = repo['mirrorsList'],
+               repo_file_name = repo['repoName'],
+               repo_template = repo_template,
+               components = ubuntu_components, # ubuntu specific
+    )
+
+def install_repos():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
+  _alter_repo("create", params.repo_info, template)
+  if params.service_repo_info:
+    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..1609050
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import stack_tools
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.core.resources.packaging import Package
+
+def install_packages():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  packages = ['unzip', 'curl']
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
+    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
+    packages.append(stack_selector_package)
+  Package(packages,
+          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
new file mode 100644
index 0000000..14b9d99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class BeforeRestartHook(Hook):
+
+  def hook(self, env):
+    self.run_custom_hook('before-START')
+
+if __name__ == "__main__":
+  BeforeRestartHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
new file mode 100644
index 0000000..68aa96d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
+  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar
new file mode 100644
index 0000000..c90890b
Binary files /dev/null and b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
new file mode 100644
index 0000000..7e12962
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,134 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+ 
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
new file mode 100644
index 0000000..0f7a55c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import sys, os
+from string import join
+import ConfigParser
+
+
+DEFAULT_RACK = "/default-rack"
+DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
+SECTION_NAME = "network_topology"
+
+class TopologyScript():
+
+  def load_rack_map(self):
+    try:
+      #RACK_MAP contains both host name vs rack and ip vs rack mappings
+      mappings = ConfigParser.ConfigParser()
+      mappings.read(DATA_FILE_NAME)
+      return dict(mappings.items(SECTION_NAME))
+    except ConfigParser.NoSectionError:
+      return {}
+
+  def get_racks(self, rack_map, args):
+    if len(args) == 1:
+      return DEFAULT_RACK
+    else:
+      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
+
+  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
+    #try looking up by hostname
+    rack = rack_map.get(hostname_or_ip)
+    if rack is not None:
+      return rack
+    #try looking up by ip
+    rack = rack_map.get(self.extract_ip(hostname_or_ip))
+    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
+    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
+
+  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
+  def extract_ip(self, container_string):
+    return container_string.split("/")[0].split(":")[0]
+
+  def execute(self, args):
+    rack_map = self.load_rack_map()
+    rack = self.get_racks(rack_map, args)
+    print rack
+
+if __name__ == "__main__":
+  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/757267fc/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000..f21e4b1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
@@ -0,0 +1,39 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from rack_awareness import create_topology_script_and_mapping
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
+
+class BeforeStartHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+
+    setup_hadoop()
+    setup_configs()
+    create_javahome_symlink()
+    create_topology_script_and_mapping()
+
+if __name__ == "__main__":
+  BeforeStartHook().execute()