You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2016/12/05 20:23:10 UTC

[01/14] ambari git commit: Revert "AMBARI-19055 - Removing Tasks From host_role_command Causes Upgrades To Show As PENDING (jonathanhurley)"

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-18456 3bbe75c3b -> 803f44ba5


Revert "AMBARI-19055 - Removing Tasks From host_role_command Causes Upgrades To Show As PENDING (jonathanhurley)"

This reverts commit 32840c1ed434ae4cc99e3ab6f7d3f2604fb69d06.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5ec73bce
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5ec73bce
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5ec73bce

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 5ec73bce006a659f33c09c57cb50f28037eca5d7
Parents: 3096c79
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Sat Dec 3 08:22:31 2016 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Sat Dec 3 08:22:31 2016 -0500

----------------------------------------------------------------------
 .../controller/internal/CalculatedStatus.java   | 52 +++++---------------
 .../internal/RequestResourceProvider.java       | 34 +++++--------
 .../internal/StageResourceProvider.java         |  2 +-
 .../internal/CalculatedStatusTest.java          | 31 ------------
 .../internal/RequestResourceProviderTest.java   | 12 ++---
 5 files changed, 28 insertions(+), 103 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec73bce/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
index 3c415df..3a86aef 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
@@ -53,17 +53,6 @@ public class CalculatedStatus {
    */
   private final double percent;
 
-  /**
-   * A status which represents a COMPLETED state at 0%
-   */
-  public static final CalculatedStatus COMPLETED = new CalculatedStatus(HostRoleStatus.COMPLETED,
-      HostRoleStatus.COMPLETED, 100.0);
-
-  /**
-   * A status which represents a PENDING state at 0%
-   */
-  public static final CalculatedStatus PENDING = new CalculatedStatus(HostRoleStatus.PENDING,
-      HostRoleStatus.PENDING, 0.0);
 
   // ----- Constructors ------------------------------------------------------
 
@@ -90,6 +79,12 @@ public class CalculatedStatus {
     this.percent = percent;
   }
 
+  /**
+   * Static factory method to get Status that represents a Completed state
+   */
+  public static CalculatedStatus getCompletedStatus() {
+    return new CalculatedStatus(HostRoleStatus.COMPLETED, HostRoleStatus.COMPLETED, 100.0);
+  }
 
   // ----- CalculatedStatus --------------------------------------------------
 
@@ -296,25 +291,14 @@ public class CalculatedStatus {
   }
 
   /**
-   * Calculates the overall status of an upgrade. If there are no tasks, then a
-   * status of {@link HostRoleStatus#COMPLETED} is returned.
-   *
-   * @param stageDto
-   *          the map of stage-to-summary value objects
-   * @param stageIds
-   *          the stage ids to consider from the value objects
+   * Calculates the overall status of an upgrade.
+   * @param stageDto  the map of stage-to-summary value objects
+   * @param stageIds  the stage ids to consider from the value objects
    * @return the calculated status
    */
   public static CalculatedStatus statusFromStageSummary(Map<Long, HostRoleCommandStatusSummaryDTO> stageDto,
       Set<Long> stageIds) {
 
-    // if either are empty, then we have no tasks and therefore no status - we
-    // should return COMPLETED. This can happen if someone removes all tasks but
-    // leaves the stages and request
-    if (stageDto.isEmpty() || stageIds.isEmpty()) {
-      return COMPLETED;
-    }
-
     Collection<HostRoleStatus> stageStatuses = new HashSet<>();
     Collection<HostRoleStatus> stageDisplayStatuses = new HashSet<>();
     Collection<HostRoleStatus> taskStatuses = new ArrayList<>();
@@ -394,28 +378,19 @@ public class CalculatedStatus {
    */
   public static HostRoleStatus calculateSummaryStatusOfStage(Map<HostRoleStatus, Integer> counters,
       int total, boolean skippable) {
-
-    // when there are 0 tasks, return COMPLETED
-    if (total == 0) {
-      return HostRoleStatus.COMPLETED;
-    }
-
     if (counters.get(HostRoleStatus.PENDING) == total) {
       return HostRoleStatus.PENDING;
     }
-
     // By definition, any tasks in a future stage must be held in a PENDING status.
     if (counters.get(HostRoleStatus.HOLDING) > 0 || counters.get(HostRoleStatus.HOLDING_FAILED) > 0 || counters.get(HostRoleStatus.HOLDING_TIMEDOUT) > 0) {
       return counters.get(HostRoleStatus.HOLDING) > 0 ? HostRoleStatus.HOLDING :
       counters.get(HostRoleStatus.HOLDING_FAILED) > 0 ? HostRoleStatus.HOLDING_FAILED :
       HostRoleStatus.HOLDING_TIMEDOUT;
     }
-
     // Because tasks are not skippable, guaranteed to be FAILED
     if (counters.get(HostRoleStatus.FAILED) > 0 && !skippable) {
       return HostRoleStatus.FAILED;
     }
-
     // Because tasks are not skippable, guaranteed to be TIMEDOUT
     if (counters.get(HostRoleStatus.TIMEDOUT) > 0  && !skippable) {
       return HostRoleStatus.TIMEDOUT;
@@ -426,11 +401,9 @@ public class CalculatedStatus {
     if (counters.get(HostRoleStatus.ABORTED) > 0 && numActiveTasks == 0) {
       return HostRoleStatus.ABORTED;
     }
-
     if (counters.get(HostRoleStatus.COMPLETED) == total) {
       return HostRoleStatus.COMPLETED;
     }
-
     return HostRoleStatus.IN_PROGRESS;
   }
 
@@ -442,8 +415,7 @@ public class CalculatedStatus {
    *
    * @return summary request status based on statuses of tasks in different states.
    */
-  protected static HostRoleStatus calculateSummaryStatusOfUpgrade(
-      Map<HostRoleStatus, Integer> counters, int total) {
+  private static HostRoleStatus calculateSummaryStatusOfUpgrade(Map<HostRoleStatus, Integer> counters, int total) {
     return calculateSummaryStatusOfStage(counters, total, false);
   }
 
@@ -456,8 +428,8 @@ public class CalculatedStatus {
    *
    * @return summary request status based on statuses of tasks in different states.
    */
-  protected static HostRoleStatus calculateSummaryDisplayStatus(
-      Map<HostRoleStatus, Integer> counters, int total, boolean skippable) {
+  private static HostRoleStatus calculateSummaryDisplayStatus(Map<HostRoleStatus, Integer> counters,
+                                                              int total, boolean skippable) {
     return counters.get(HostRoleStatus.SKIPPED_FAILED) > 0 ? HostRoleStatus.SKIPPED_FAILED :
            counters.get(HostRoleStatus.FAILED) > 0 ? HostRoleStatus.FAILED:
            calculateSummaryStatusOfStage(counters, total, skippable);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec73bce/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
index 0690ee7..8c1bc57 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
@@ -69,10 +69,10 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.topology.LogicalRequest;
 import org.apache.ambari.server.topology.TopologyManager;
-import org.apache.commons.lang.StringUtils;
 
 import com.google.common.collect.Sets;
 import com.google.inject.Inject;
+import org.apache.commons.lang.StringUtils;
 
 /**
  * Resource provider for request resources.
@@ -566,18 +566,13 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
 
     @Override
     public boolean equals(Object o) {
-      if (this == o) {
-        return true;
-      }
-      if (o == null || getClass() != o.getClass()) {
-        return false;
-      }
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
 
       ServiceComponentTuple that = (ServiceComponentTuple) o;
 
-      if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) {
+      if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null)
         return false;
-      }
       return !(componentName != null ? !componentName.equals(that.componentName) : that.componentName != null);
 
     }
@@ -744,20 +739,15 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
     // get summaries from TopologyManager for logical requests
     summary.putAll(topologyManager.getStageSummaries(entity.getRequestId()));
 
-    // summary might be empty due to delete host have cleared all
-    // HostRoleCommands or due to hosts haven't registered yet with the cluster
-    // when the cluster is provisioned with a Blueprint
-    final CalculatedStatus status;
     LogicalRequest logicalRequest = topologyManager.getRequest(entity.getRequestId());
-    if (summary.isEmpty() && null != logicalRequest) {
-      // in this case, it appears that there are no tasks but this is a logical
-      // topology request, so it's a matter of hosts simply not registering yet
-      // for tasks to be created
-      status = CalculatedStatus.PENDING;
-    } else {
-      // there are either tasks or this is not a logical request, so do normal
-      // status calculations
-      status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
+
+    CalculatedStatus status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
+    if (summary.isEmpty() && logicalRequest == null) {
+
+      // summary might be empty due to delete host have cleared all HostRoleCommands
+      // or due to hosts haven't registered yet with the cluster when the cluster is provisioned
+      // with a Blueprint
+      status = CalculatedStatus.getCompletedStatus();
     }
 
     setResourceProperty(resource, REQUEST_STATUS_PROPERTY_ID, status.getStatus().toString(), requestedPropertyIds);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec73bce/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
index a778882..59dd9d9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
@@ -339,7 +339,7 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
     CalculatedStatus status;
     if (summary.isEmpty()) {
       // Delete host might have cleared all HostRoleCommands
-      status = CalculatedStatus.COMPLETED;
+      status = CalculatedStatus.getCompletedStatus();
     } else {
       status = CalculatedStatus.statusFromStageSummary(summary, Collections.singleton(entity.getStageId()));
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec73bce/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
index a96f395..6f592cd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
@@ -657,37 +657,6 @@ public class CalculatedStatusTest {
     assertEquals(HostRoleStatus.IN_PROGRESS, calc.getStatus());
   }
 
-  /**
-   * Tests that when there are no tasks and all counts are 0, that the returned
-   * status is {@link HostRoleStatus#COMPLETED}.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testGetCompletedStatusForNoTasks() throws Exception {
-    // no status / no tasks
-    CalculatedStatus status = CalculatedStatus.statusFromTaskEntities(
-        new ArrayList<HostRoleCommandEntity>(), false);
-
-    assertEquals(HostRoleStatus.COMPLETED, status.getStatus());
-
-    // empty summaries
-    status = CalculatedStatus.statusFromStageSummary(
-        new HashMap<Long, HostRoleCommandStatusSummaryDTO>(), new HashSet<Long>());
-
-    assertEquals(HostRoleStatus.COMPLETED, status.getStatus());
-
-    // generate a map of 0's - COMPLETED=0, IN_PROGRESS=0, etc
-    Map<HostRoleStatus, Integer> counts = CalculatedStatus.calculateStatusCounts(new ArrayList<HostRoleStatus>());
-    Map<HostRoleStatus, Integer> displayCounts = CalculatedStatus.calculateStatusCounts(new ArrayList<HostRoleStatus>());
-
-    HostRoleStatus hostRoleStatus = CalculatedStatus.calculateSummaryStatusOfUpgrade(counts, 0);
-    HostRoleStatus hostRoleDisplayStatus = CalculatedStatus.calculateSummaryDisplayStatus(displayCounts, 0, false);
-
-    assertEquals(HostRoleStatus.COMPLETED, hostRoleStatus);
-    assertEquals(HostRoleStatus.COMPLETED, hostRoleDisplayStatus);
-  }
-
   private Collection<HostRoleCommandEntity> getTaskEntities(HostRoleStatus... statuses) {
     Collection<HostRoleCommandEntity> entities = new LinkedList<HostRoleCommandEntity>();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5ec73bce/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
index f7dff11..5dfc74d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
@@ -20,10 +20,11 @@ package org.apache.ambari.server.controller.internal;
 
 
 import static org.apache.ambari.server.controller.internal.HostComponentResourceProvider.HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID;
+import org.apache.ambari.server.topology.Blueprint;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.newCapture;
 import static org.powermock.api.easymock.PowerMock.createMock;
 import static org.powermock.api.easymock.PowerMock.createNiceMock;
@@ -76,7 +77,6 @@ import org.apache.ambari.server.security.authorization.AuthorizationHelperInitia
 import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.LogicalRequest;
@@ -1200,7 +1200,7 @@ public class RequestResourceProviderTest {
       }
     }
     Assert.assertNotNull(propertyIdToAssert);
-    Assert.assertEquals("true", propertyValueToAssert);
+    Assert.assertEquals("true", (String) propertyValueToAssert);
   }
 
   @Test
@@ -1624,12 +1624,6 @@ public class RequestResourceProviderTest {
     verify(managementController, actionManager, clusters, requestMock, requestDAO, hrcDAO);
   }
 
-  /**
-   * Tests that topology requests return different status (PENDING) if there are
-   * no tasks. Normal requests should return COMPLETED.
-   *
-   * @throws Exception
-   */
   @Test
   @PrepareForTest(AmbariServer.class)
   public void testGetLogicalRequestStatusWithNoTasks() throws Exception {


[12/14] ambari git commit: AMBARI-19084. Custom property added to the Notification settings appears twice (onechiporenko)

Posted by jo...@apache.org.
AMBARI-19084. Custom property added to the Notification settings appears twice (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b2d2ef16
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b2d2ef16
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b2d2ef16

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: b2d2ef1689d5434c20d6a791ace598f58742605a
Parents: f5051dc
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Mon Dec 5 17:08:41 2016 +0200
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Mon Dec 5 18:05:42 2016 +0200

----------------------------------------------------------------------
 .../common/configs/notifications_configs.hbs      |  2 +-
 .../notification_configs_view.js                  | 18 ++++++++++++++----
 .../configs/service_configs_by_category_view.js   |  9 ++++++++-
 3 files changed, 23 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b2d2ef16/ambari-web/app/templates/common/configs/notifications_configs.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/configs/notifications_configs.hbs b/ambari-web/app/templates/common/configs/notifications_configs.hbs
index ca2c208..2dd71aa 100644
--- a/ambari-web/app/templates/common/configs/notifications_configs.hbs
+++ b/ambari-web/app/templates/common/configs/notifications_configs.hbs
@@ -20,7 +20,7 @@
   <div class="panel-heading" {{action "onToggleBlock" category target="view"}}>
     <h3 class="panel-title">
       <i {{bindAttr class=":pull-left :panel-toggle view.category.isCollapsed:icon-caret-right:icon-caret-down"}}></i>
-      <a class="panel-toggle">{{view.category.displayName}}</a>
+      <a class="panel-toggle category-header"><span class="category-name">{{view.category.displayName}}</span></a>
     </h3>
   </div>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2d2ef16/ambari-web/app/views/common/configs/custom_category_views/notification_configs_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/custom_category_views/notification_configs_view.js b/ambari-web/app/views/common/configs/custom_category_views/notification_configs_view.js
index 9ad8762..d299b5f 100644
--- a/ambari-web/app/views/common/configs/custom_category_views/notification_configs_view.js
+++ b/ambari-web/app/views/common/configs/custom_category_views/notification_configs_view.js
@@ -62,8 +62,8 @@ App.NotificationsConfigsView = App.ServiceConfigsByCategoryView.extend({
     if (!this.get('categoryConfigsAll.length')) return;
     this.set('createNotification', this.get('categoryConfigsAll').findProperty('name', 'create_notification').get('value'));
     this.set('tlsOrSsl', this.get('categoryConfigsAll').findProperty('name', 'mail.smtp.starttls.enable').get('value') ? 'tls' : 'ssl');
-    var smtp_use_auth = this.get('categoryConfigsAll').findProperty('name', 'smtp_use_auth');
-    smtp_use_auth.set('value', Boolean(smtp_use_auth.get('value') === 'true'));
+    var smtpUseAuth = this.get('categoryConfigsAll').findProperty('name', 'smtp_use_auth');
+    smtpUseAuth.set('value', Boolean(smtpUseAuth.get('value') === 'true'));
     this.updateCategoryConfigs();
   },
 
@@ -74,8 +74,8 @@ App.NotificationsConfigsView = App.ServiceConfigsByCategoryView.extend({
    */
   onTlsOrSslChanged: function () {
     var tlsOrSsl = this.get('tlsOrSsl');
-    this.get('categoryConfigsAll').findProperty('name', 'mail.smtp.starttls.enable').set('value', tlsOrSsl == 'tls');
-    this.get('categoryConfigsAll').findProperty('name', 'mail.smtp.startssl.enable').set('value', tlsOrSsl == 'ssl');
+    this.get('categoryConfigsAll').findProperty('name', 'mail.smtp.starttls.enable').set('value', tlsOrSsl === 'tls');
+    this.get('categoryConfigsAll').findProperty('name', 'mail.smtp.startssl.enable').set('value', tlsOrSsl === 'ssl');
   }.observes('tlsOrSsl'),
 
   /**
@@ -122,6 +122,16 @@ App.NotificationsConfigsView = App.ServiceConfigsByCategoryView.extend({
   updateConfig: function (config, flag) {
     config.set('isRequired', flag);
     config.set('isEditable', flag);
+  },
+
+  /**
+   * No sense to store config to <code>serviceConfigs</code> and <code>categoryConfigsAll</code> because
+   * <code>categoryConfigsAll</code> is a subset of <code>serviceConfigs</code>
+   *
+   * @override
+   */
+  _appendConfigToCollection: function (serviceConfigProperty) {
+    this.get('serviceConfigs').pushObject(serviceConfigProperty);
   }
 
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2d2ef16/ambari-web/app/views/common/configs/service_configs_by_category_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/configs/service_configs_by_category_view.js b/ambari-web/app/views/common/configs/service_configs_by_category_view.js
index dfcd5df..16258cf 100644
--- a/ambari-web/app/views/common/configs/service_configs_by_category_view.js
+++ b/ambari-web/app/views/common/configs/service_configs_by_category_view.js
@@ -439,7 +439,14 @@ App.ServiceConfigsByCategoryView = Em.View.extend(App.UserPref, App.ConfigOverri
         isNotSaved: true
       }, selectedConfigGroup);
     }
-    var serviceConfigProperty = App.ServiceConfigProperty.create(config);
+    this._appendConfigToCollection(App.ServiceConfigProperty.create(config));
+  },
+
+  /**
+   * @param {App.ServiceConfigProperty} serviceConfigProperty
+   * @private
+   */
+  _appendConfigToCollection: function (serviceConfigProperty) {
     this.get('serviceConfigs').pushObject(serviceConfigProperty);
     this.get('categoryConfigsAll').pushObject(serviceConfigProperty);
   },


[14/14] ambari git commit: Merge branch 'trunk' into branch-feature-AMBARI-18456

Posted by jo...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-18456


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/803f44ba
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/803f44ba
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/803f44ba

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 803f44ba5f7411d9421b44dbacaa48f81ce3edc6
Parents: 3bbe75c 8fbc271
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Dec 5 15:21:25 2016 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Mon Dec 5 15:21:25 2016 -0500

----------------------------------------------------------------------
 .../libraries/functions/constants.py            |   1 +
 .../libraries/functions/setup_atlas_hook.py     |  47 +-
 .../BlueprintConfigurationProcessor.java        |   3 +-
 .../ambari/server/topology/TopologyManager.java |  19 +-
 .../server/upgrade/UpgradeCatalog250.java       |  30 +
 .../src/main/python/ambari_server_main.py       |   8 +-
 .../0.5.0.2.1/configuration/falcon-env.xml      |  17 +
 .../FALCON/0.5.0.2.1/metainfo.xml               |   7 +
 .../FALCON/0.5.0.2.1/package/scripts/falcon.py  |   2 +-
 .../0.5.0.2.1/package/scripts/params_linux.py   |   5 +-
 .../FALCON/0.5.0.2.1/quicklinks/quicklinks.json |  35 +
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |  17 +
 .../HIVE/0.12.0.2.0/package/scripts/hcat.py     |   2 +-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |   2 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |   5 +-
 .../HIVE/0.12.0.2.0/package/scripts/webhcat.py  |   2 +-
 .../RANGER/0.4.0/package/scripts/params.py      |   1 +
 .../0.4.0/package/scripts/ranger_admin.py       |  36 +-
 .../SQOOP/1.4.4.2.0/configuration/sqoop-env.xml |  17 +
 .../1.4.4.2.0/configuration/sqoop-site.xml      |   6 +
 .../1.4.4.2.0/package/scripts/params_linux.py   |   5 +-
 .../SQOOP/1.4.4.2.0/package/scripts/sqoop.py    |   2 +-
 .../STORM/0.10.0/configuration/storm-env.xml    |  17 +
 .../STORM/0.9.1/package/scripts/params_linux.py |   6 +-
 .../STORM/0.9.1/package/scripts/storm.py        |   2 +-
 .../scripts/post-user-creation-hook.sh          |   7 +
 .../HDP/2.0.6/properties/stack_features.json    |   5 +
 .../services/HIVE/configuration/hive-site.xml   |   4 +
 .../services/STORM/configuration/storm-site.xml |   4 +
 .../stacks/HDP/2.3/services/stack_advisor.py    |  57 +-
 .../stacks/HDP/2.5/services/stack_advisor.py    |   6 -
 .../BlueprintConfigurationProcessorTest.java    |  11 +
 .../server/upgrade/UpgradeCatalog250Test.java   |  93 +++
 .../stacks/2.3/common/test_stack_advisor.py     |  28 +-
 .../stacks/2.6/RANGER/test_ranger_admin.py      | 504 ++++++++++++
 .../2.6/configs/ranger-admin-default.json       | 704 +++++++++++++++++
 .../2.6/configs/ranger-admin-secured.json       | 773 +++++++++++++++++++
 ambari-web/app/styles/wizard.less               |   6 +-
 .../common/configs/notifications_configs.hbs    |   2 +-
 ambari-web/app/templates/wizard/step1.hbs       |   2 +-
 .../notification_configs_view.js                |  18 +-
 .../configs/service_configs_by_category_view.js |   9 +-
 ambari-web/app/views/wizard/step1_view.js       |  21 +-
 .../widgets/slider_config_widget_view_test.js   |  23 -
 .../test/views/main/dashboard/widgets_test.js   |   2 +-
 contrib/utils/perf/deploy-gce-perf-cluster.py   |  10 +-
 .../src/main/resources/view.xml                 |  23 +-
 contrib/views/files/src/main/resources/view.xml |   7 +
 .../views/hive-next/src/main/resources/view.xml |   7 +
 contrib/views/tez/src/main/resources/view.xml   |   7 +
 50 files changed, 2502 insertions(+), 125 deletions(-)
----------------------------------------------------------------------



[03/14] ambari git commit: AMBARI-19007 Atlas to support configuration of hooks from separate cluster (mugdha)

Posted by jo...@apache.org.
AMBARI-19007 Atlas to support configuration of hooks from separate cluster (mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8ad494e9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8ad494e9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8ad494e9

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 8ad494e97302d5da69224c7ce9e493d5f20797c1
Parents: 12a3cd8
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Fri Dec 2 18:15:39 2016 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Mon Dec 5 10:13:09 2016 +0530

----------------------------------------------------------------------
 .../libraries/functions/setup_atlas_hook.py     | 47 +++++-----
 .../BlueprintConfigurationProcessor.java        |  3 +-
 .../server/upgrade/UpgradeCatalog250.java       | 30 +++++++
 .../0.5.0.2.1/configuration/falcon-env.xml      | 17 ++++
 .../FALCON/0.5.0.2.1/package/scripts/falcon.py  |  2 +-
 .../0.5.0.2.1/package/scripts/params_linux.py   |  5 +-
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  | 17 ++++
 .../HIVE/0.12.0.2.0/package/scripts/hcat.py     |  2 +-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |  2 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |  5 +-
 .../HIVE/0.12.0.2.0/package/scripts/webhcat.py  |  2 +-
 .../SQOOP/1.4.4.2.0/configuration/sqoop-env.xml | 17 ++++
 .../1.4.4.2.0/configuration/sqoop-site.xml      |  6 ++
 .../1.4.4.2.0/package/scripts/params_linux.py   |  5 +-
 .../SQOOP/1.4.4.2.0/package/scripts/sqoop.py    |  2 +-
 .../STORM/0.10.0/configuration/storm-env.xml    | 17 ++++
 .../STORM/0.9.1/package/scripts/params_linux.py |  6 +-
 .../STORM/0.9.1/package/scripts/storm.py        |  2 +-
 .../services/HIVE/configuration/hive-site.xml   |  4 +
 .../services/STORM/configuration/storm-site.xml |  4 +
 .../stacks/HDP/2.3/services/stack_advisor.py    | 57 +++++++++++-
 .../stacks/HDP/2.5/services/stack_advisor.py    |  6 --
 .../BlueprintConfigurationProcessorTest.java    | 11 +++
 .../server/upgrade/UpgradeCatalog250Test.java   | 93 ++++++++++++++++++++
 .../stacks/2.3/common/test_stack_advisor.py     | 28 +++++-
 25 files changed, 339 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
index a1d2f95..eeae4bc 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_atlas_hook.py
@@ -117,6 +117,8 @@ def setup_atlas_hook(service_name, service_props, atlas_hook_filepath, owner, gr
   """
   import params
   atlas_props = default('/configurations/application-properties', {})
+  merged_props = {}
+  merged_props.update(service_props)
 
   if has_atlas_in_cluster():
     # Take the subset
@@ -131,12 +133,12 @@ def setup_atlas_hook(service_name, service_props, atlas_hook_filepath, owner, gr
 
     merged_props.update(service_props)
 
-    Logger.info(format("Generating Atlas Hook config file {atlas_hook_filepath}"))
-    PropertiesFile(atlas_hook_filepath,
-                   properties = merged_props,
-                   owner = owner,
-                   group = group,
-                   mode = 0644)
+  Logger.info(format("Generating Atlas Hook config file {atlas_hook_filepath}"))
+  PropertiesFile(atlas_hook_filepath,
+           properties = merged_props,
+           owner = owner,
+           group = group,
+           mode = 0644)
 
 
 def setup_atlas_jar_symlinks(hook_name, jar_source_dir):
@@ -157,22 +159,23 @@ def setup_atlas_jar_symlinks(hook_name, jar_source_dir):
   """
   import params
 
-  if has_atlas_in_cluster():
-    atlas_home_dir = os.environ['METADATA_HOME_DIR'] if 'METADATA_HOME_DIR' in os.environ \
-      else format("{stack_root}/current/atlas-server")
-
-    # Will only exist if this host contains Atlas Server
-    atlas_hook_dir = os.path.join(atlas_home_dir, "hook", hook_name)
-    if os.path.exists(atlas_hook_dir):
-      Logger.info("Atlas Server is present on this host, will symlink jars inside of %s to %s if not already done." %
-                  (jar_source_dir, atlas_hook_dir))
-
-      src_files = os.listdir(atlas_hook_dir)
-      for file_name in src_files:
-        atlas_hook_file_name = os.path.join(atlas_hook_dir, file_name)
-        source_lib_file_name = os.path.join(jar_source_dir, file_name)
-        if os.path.isfile(atlas_hook_file_name):
-          Link(source_lib_file_name, to=atlas_hook_file_name)
+  atlas_home_dir = os.environ['METADATA_HOME_DIR'] if 'METADATA_HOME_DIR' in os.environ \
+    else format("{stack_root}/current/atlas-server")
+
+  # Will only exist if this host contains Atlas Server
+  atlas_hook_dir = os.path.join(atlas_home_dir, "hook", hook_name)
+  if os.path.exists(atlas_hook_dir):
+    Logger.info("Atlas Server is present on this host, will symlink jars inside of %s to %s if not already done." %
+                (jar_source_dir, atlas_hook_dir))
+
+    src_files = os.listdir(atlas_hook_dir)
+    for file_name in src_files:
+      atlas_hook_file_name = os.path.join(atlas_hook_dir, file_name)
+      source_lib_file_name = os.path.join(jar_source_dir, file_name)
+      if os.path.isfile(atlas_hook_file_name):
+        Link(source_lib_file_name, to=atlas_hook_file_name)
+  else:
+    Logger.info("Atlas hook directory path {0} doesn't exist".format(atlas_hook_dir))
 
 def install_atlas_hook_packages(atlas_plugin_package, atlas_ubuntu_plugin_package, host_sys_prepped,
                                 agent_stack_retry_on_unavailability, agent_stack_retry_count):

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index f890326..737e8de 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -2523,9 +2523,10 @@ public class BlueprintConfigurationProcessor {
         }
 
         boolean isAtlasInCluster = topology.getBlueprint().getServices().contains("ATLAS");
+        boolean isAtlasHiveHookEnabled = Boolean.parseBoolean(properties.get("hive-env").get("hive.atlas.hook"));
 
         // Append atlas hook if not already present.
-        if (isAtlasInCluster) {
+        if (isAtlasInCluster || isAtlasHiveHookEnabled) {
           if (!hiveHooksClean.contains(atlasHookClass)) {
             hiveHooksClean.add(atlasHookClass);
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index 837f340..ff1663e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@ -137,6 +137,7 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
     updateKafkaConfigs();
     updateHiveLlapConfigs();
     updateTablesForZeppelinViewRemoval();
+    updateAtlasConfigs();
   }
 
   protected void updateHostVersionTable() throws SQLException {
@@ -326,5 +327,34 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
     dbAccessor.addColumn(SERVICE_DESIRED_STATE_TABLE,
             new DBColumnInfo(CREDENTIAL_STORE_ENABLED_COL, Short.class, null, 0, false));
   }
+
+  protected void updateAtlasConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = clusters.getClusters();
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          updateAtlasHookConfig(cluster, "HIVE", "hive-env", "hive.atlas.hook");
+          updateAtlasHookConfig(cluster, "STORM", "storm-env", "storm.atlas.hook");
+          updateAtlasHookConfig(cluster, "FALCON", "falcon-env", "falcon.atlas.hook");
+          updateAtlasHookConfig(cluster, "SQOOP", "sqoop-env", "sqoop.atlas.hook");
+        }
+      }
+    }
+  }
+
+  protected void updateAtlasHookConfig(Cluster cluster, String serviceName, String configType, String propertyName) throws AmbariException {
+    Set<String> installedServices = cluster.getServices().keySet();
+    if (installedServices.contains("ATLAS") && installedServices.contains(serviceName)) {
+      Config configEnv = cluster.getDesiredConfigByType(configType);
+      if (configEnv != null) {
+        Map<String, String> newProperties = new HashMap<>();
+        newProperties.put(propertyName, "true");
+        boolean updateProperty = configEnv.getProperties().containsKey(propertyName);
+        updateConfigurationPropertiesForCluster(cluster, configType, newProperties, updateProperty, true);
+      }
+    }
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
index 09cced6..5663f57 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/configuration/falcon-env.xml
@@ -21,6 +21,23 @@
 -->
 <configuration supports_adding_forbidden="true">
   <property>
+    <name>falcon.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
     <name>falcon_user</name>
     <display-name>Falcon User</display-name>
     <value>falcon</value>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
index 3ecb65b..78874ef 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon.py
@@ -121,7 +121,7 @@ def falcon(type, action = None, upgrade_type=None):
         cd_access = "a")
 
     # Generate atlas-application.properties.xml file
-    if params.falcon_atlas_support:
+    if params.falcon_atlas_support and params.enable_atlas_hook:
       # If Atlas is added later than Falcon, this package will be absent.
       install_atlas_hook_packages(params.atlas_plugin_package, params.atlas_ubuntu_plugin_package, params.host_sys_prepped,
                                   params.agent_stack_retry_on_unavailability, params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
index 0fb21d0..6d1ae03 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
@@ -138,14 +138,15 @@ dfs_data_mirroring_dir = "/apps/data-mirroring"
 ########################################################
 #region Atlas Hooks
 falcon_atlas_application_properties = default('/configurations/falcon-atlas-application.properties', {})
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+enable_atlas_hook = default('/configurations/falcon-env/falcon.atlas.hook', False)
 
 # Calculate atlas_hook_cp to add to FALCON_EXTRA_CLASS_PATH
 falcon_atlas_support = False
 
 # Path to add to environment variable
 atlas_hook_cp = ""
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+if enable_atlas_hook:
 
   # stack_version doesn't contain a minor number of the stack (only first two numbers: 2.3). Get it from current_version_formatted
   falcon_atlas_support = current_version_formatted and check_stack_feature(StackFeature.FALCON_ATLAS_SUPPORT_2_3, current_version_formatted) \

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
index a3ad66a..7a3390e 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
@@ -21,6 +21,23 @@
 -->
 <configuration supports_adding_forbidden="true">
   <property>
+    <name>hive.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
     <name>hive.client.heapsize</name>
     <value>512</value>
     <description>Hive Client Java heap size</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
index e952767..eecbcff 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
@@ -82,6 +82,6 @@ def hcat():
   )
 
   # Generate atlas-application.properties.xml file
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index f08c458..ffc996c 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -68,7 +68,7 @@ def hive(name=None):
             mode=0644)
 
   # Generate atlas-application.properties.xml file
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
   

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index a461bfe..f1a95ed 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -517,9 +517,8 @@ metrics_collection_period = default("/configurations/ams-site/timeline.metrics.s
 ########################################################
 #region Atlas Hooks
 hive_atlas_application_properties = default('/configurations/hive-atlas-application.properties', {})
-
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+enable_atlas_hook = default('/configurations/hive-env/hive.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
 #endregion
 
 ########################################################

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
index 00b057c..0af54af 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
@@ -133,7 +133,7 @@ def webhcat():
     )
 
   # Generate atlas-application.properties.xml file
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     # WebHCat uses a different config dir than the rest of the daemons in Hive.
     atlas_hook_filepath = os.path.join(params.config_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
index f682e97..508cfab 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
@@ -22,6 +22,23 @@
 <configuration supports_adding_forbidden="true">
   <!-- sqoop-env.sh -->
   <property>
+    <name>sqoop.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
     <name>content</name>
     <display-name>sqoop-env template</display-name>
     <description>This is the jinja template for sqoop-env.sh file</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml
index f7823d2..5d87c4d 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-site.xml
@@ -35,5 +35,11 @@
   <property>
     <name>sqoop.job.data.publish.class</name>
     <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>sqoop-env</type>
+        <name>sqoop.atlas.hook</name>
+      </property>
+    </depends-on>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
index 283f54d..c1138b3 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
@@ -130,7 +130,6 @@ jdk_location = config['hostLevelParams']['jdk_location']
 ########################################################
 #region Atlas Hooks
 sqoop_atlas_application_properties = default('/configurations/sqoop-atlas-application.properties', {})
-
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
+enable_atlas_hook = default('/configurations/sqoop-env/sqoop.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
 #endregion

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
index 68f06db..d005cbe 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
@@ -64,7 +64,7 @@ def sqoop(type=None):
             )
 
   # Generate atlas-application.properties.xml file and symlink the hook jars
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     atlas_hook_filepath = os.path.join(params.sqoop_conf_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.SQOOP, params.sqoop_atlas_application_properties, atlas_hook_filepath, params.sqoop_user, params.user_group)
     setup_atlas_jar_symlinks("sqoop", params.sqoop_lib)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
index 9547335..3b814a9 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/storm-env.xml
@@ -21,6 +21,23 @@
 -->
 <configuration supports_adding_forbidden="true">
   <property>
+    <name>storm.atlas.hook</name>
+    <value>false</value>
+    <display-name>Enable Atlas Hook</display-name>
+    <description>Enable Atlas Hook</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.rest.address</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
     <name>nimbus_seeds_supported</name>
     <value>true</value>
     <description/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
index aca0681..4859534 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
@@ -214,10 +214,10 @@ jar_jvm_opts = ''
 ########################################################
 #region Atlas Hooks
 storm_atlas_application_properties = default('/configurations/storm-atlas-application.properties', {})
+enable_atlas_hook = default('/configurations/storm-env/storm.atlas.hook', False)
+atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
 
-if has_atlas_in_cluster():
-  atlas_hook_filename = default('/configurations/atlas-env/metadata_conf_file', 'atlas-application.properties')
-
+if enable_atlas_hook:
   # Only append /etc/atlas/conf to classpath if on HDP 2.4.*
   if check_stack_feature(StackFeature.ATLAS_CONF_DIR_IN_PATH, stack_version_formatted):
     atlas_conf_dir = os.environ['METADATA_CONF'] if 'METADATA_CONF' in os.environ else '/etc/atlas/conf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
index bda4fe2..f02ced4 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/storm.py
@@ -99,7 +99,7 @@ def storm(name=None):
   )
 
   # Generate atlas-application.properties.xml file and symlink the hook jars
-  if has_atlas_in_cluster():
+  if params.enable_atlas_hook:
     atlas_hook_filepath = os.path.join(params.conf_dir, params.atlas_hook_filename)
     setup_atlas_hook(SERVICE.STORM, params.storm_atlas_application_properties, atlas_hook_filepath, params.storm_user, params.user_group)
     storm_extlib_dir = os.path.join(params.storm_component_home_dir, "extlib")

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
index ab350dc..287db10 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/hive-site.xml
@@ -232,6 +232,10 @@ limitations under the License.
     <depends-on>
       <property>
         <type>hive-env</type>
+        <name>hive.atlas.hook</name>
+      </property>
+      <property>
+        <type>hive-env</type>
         <name>hive_timeline_logging_enabled</name>
       </property>
       <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
index 2392079..5de6898 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/storm-site.xml
@@ -67,6 +67,10 @@
         <type>application-properties</type>
         <name>atlas.authentication.method</name>
       </property>
+       <property>
+        <type>storm-env</type>
+        <name>storm.atlas.hook</name>
+      </property>
     </depends-on>
     <on-ambari-upgrade add="false"/>
     <on-stack-upgrade merge="false"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 4e2a994..5d34779 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -81,7 +81,8 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       "RANGER": self.recommendRangerConfigurations,
       "RANGER_KMS": self.recommendRangerKMSConfigurations,
       "STORM": self.recommendStormConfigurations,
-      "SQOOP": self.recommendSqoopConfigurations
+      "SQOOP": self.recommendSqoopConfigurations,
+      "FALCON": self.recommendFalconConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
     return parentRecommendConfDict
@@ -219,6 +220,7 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     super(HDP23StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
     putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
     putHiveServerProperty = self.putProperty(configurations, "hiveserver2-site", services)
+    putHiveEnvProperty = self.putProperty(configurations, "hive-env", services)
     putHiveSitePropertyAttribute = self.putPropertyAttribute(configurations, "hive-site")
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     # hive_security_authorization == 'ranger'
@@ -258,7 +260,19 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
     hive_hooks = [x for x in hive_hooks if x != ""]
     is_atlas_present_in_cluster = "ATLAS" in servicesList
 
+    enable_atlas_hook = False
     if is_atlas_present_in_cluster:
+      putHiveEnvProperty("hive.atlas.hook", "true")
+    else:
+      putHiveEnvProperty("hive.atlas.hook", "false")
+
+    if ('hive-env' in services['configurations']) and ('hive.atlas.hook' in services['configurations']['hive-env']['properties']):
+      if 'hive-env' in configurations and 'hive.atlas.hook' in configurations['hive-env']['properties']:
+        enable_atlas_hook = configurations['hive-env']['properties']['hive.atlas.hook'] == "true"
+      elif 'hive-env' in services['configurations'] and 'hive.atlas.hook' in services['configurations']['hive-env']['properties']:
+        enable_atlas_hook = services['configurations']['hive-env']['properties']['hive.atlas.hook'] == "true"
+
+    if enable_atlas_hook:
       # Append atlas hook if not already present.
       is_atlas_hook_in_config = atlas_hook_class in hive_hooks
       if not is_atlas_hook_in_config:
@@ -791,14 +805,28 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
 
   def recommendSqoopConfigurations(self, configurations, clusterData, services, hosts):
     putSqoopSiteProperty = self.putProperty(configurations, "sqoop-site", services)
+    putSqoopEnvProperty = self.putProperty(configurations, "sqoop-env", services)
 
+    enable_atlas_hook = False
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     if "ATLAS" in servicesList:
+      putSqoopEnvProperty("sqoop.atlas.hook", "true")
+    else:
+      putSqoopEnvProperty("sqoop.atlas.hook", "false")
+
+    if ('sqoop-env' in services['configurations']) and ('sqoop.atlas.hook' in services['configurations']['sqoop-env']['properties']):
+      if 'sqoop-env' in configurations and 'sqoop.atlas.hook' in configurations['sqoop-env']['properties']:
+        enable_atlas_hook = configurations['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+      elif 'sqoop-env' in services['configurations'] and 'sqoop.atlas.hook' in services['configurations']['sqoop-env']['properties']:
+        enable_atlas_hook = services['configurations']['sqoop-env']['properties']['sqoop.atlas.hook'] == "true"
+
+    if enable_atlas_hook:
       putSqoopSiteProperty('sqoop.job.data.publish.class', 'org.apache.atlas.sqoop.hook.SqoopHook')
 
   def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP23StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
     putStormStartupProperty = self.putProperty(configurations, "storm-site", services)
+    putStormEnvProperty = self.putProperty(configurations, "storm-env", services)
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
 
     if "storm-site" in services["configurations"]:
@@ -814,11 +842,23 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       atlas_is_present = "ATLAS" in servicesList
       atlas_hook_class = "org.apache.atlas.storm.hook.StormAtlasHook"
       atlas_hook_is_set = atlas_hook_class in notifier_plugin_value
+      enable_atlas_hook = False
+
+      if atlas_is_present:
+        putStormEnvProperty("storm.atlas.hook", "true")
+      else:
+        putStormEnvProperty("storm.atlas.hook", "false")
 
-      if atlas_is_present and not atlas_hook_is_set:
+      if ('storm-env' in services['configurations']) and ('storm.atlas.hook' in services['configurations']['storm-env']['properties']):
+        if 'storm-env' in configurations and 'storm.atlas.hook' in configurations['storm-env']['properties']:
+          enable_atlas_hook = configurations['storm-env']['properties']['storm.atlas.hook'] == "true"
+        elif 'storm-env' in services['configurations'] and 'storm.atlas.hook' in services['configurations']['storm-env']['properties']:
+          enable_atlas_hook = services['configurations']['storm-env']['properties']['storm.atlas.hook'] == "true"
+
+      if enable_atlas_hook and not atlas_hook_is_set:
         notifier_plugin_value = atlas_hook_class if notifier_plugin_value == " " else ",".join([notifier_plugin_value, atlas_hook_class])
 
-      if not atlas_is_present and atlas_hook_is_set:
+      if not enable_atlas_hook and atlas_hook_is_set:
         application_classes = [item for item in notifier_plugin_value.split(",") if item != atlas_hook_class and item != " "]
         notifier_plugin_value = ",".join(application_classes) if application_classes else " "
 
@@ -828,6 +868,17 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
         putStormStartupPropertyAttribute = self.putPropertyAttribute(configurations, "storm-site")
         putStormStartupPropertyAttribute(notifier_plugin_property, 'delete', 'true')
 
+  def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
+
+    putFalconEnvProperty = self.putProperty(configurations, "falcon-env", services)
+    enable_atlas_hook = False
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    if "ATLAS" in servicesList:
+      putFalconEnvProperty("falcon.atlas.hook", "true")
+    else:
+      putFalconEnvProperty("falcon.atlas.hook", "false")
+
   def getServiceConfigurationValidators(self):
     parentValidators = super(HDP23StackAdvisor, self).getServiceConfigurationValidators()
     childValidators = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 923c1f7..57555ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -84,12 +84,6 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     services["forced-configurations"].append({"type" : "oozie-env", "name" : "oozie_admin_users"})
     putOozieEnvProperty("oozie_admin_users", newAdminUsers)
 
-  def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
-    # Set the classname for the Falcon hook to 2.5 classname = all other 2.3
-    # stack advisor changes are needed.
-    self.atlasFalconHookClassName = "org.apache.atlas.falcon.service.AtlasService"
-    super(HDP25StackAdvisor,self).recommendFalconConfigurations(configurations, clusterData, services, hosts)
-
   def createComponentLayoutRecommendations(self, services, hosts):
     parentComponentLayoutRecommendations = super(HDP25StackAdvisor, self).createComponentLayoutRecommendations(
       services, hosts)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 5bedb9d..0ad0351 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -5923,6 +5923,10 @@ public class BlueprintConfigurationProcessorTest {
     hiveProperties.put("hive.exec.post.hooks", "");
     properties.put("hive-site", hiveProperties);
 
+    Map<String, String> hiveEnv = new HashMap<String, String>();
+    hiveEnv.put("hive.atlas.hook", "false");
+    properties.put("hive-env", hiveEnv);
+
 
     Map<String, Map<String, String>> parentProperties = new HashMap<String, Map<String, String>>();
     Configuration parentClusterConfig = new Configuration(parentProperties,
@@ -6000,6 +6004,9 @@ public class BlueprintConfigurationProcessorTest {
     hiveProperties.put("hive.exec.post.hooks", "");
     properties.put("hive-site", hiveProperties);
 
+    Map<String, String> hiveEnv = new HashMap<String, String>();
+    properties.put("hive-env", hiveEnv);
+
     return properties;
   }
 
@@ -6051,6 +6058,10 @@ public class BlueprintConfigurationProcessorTest {
     hiveProperties.put("hive.exec.post.hooks", "foo");
     properties.put("hive-site", hiveProperties);
 
+    Map<String, String> hiveEnv = new HashMap<String, String>();
+    hiveEnv.put("hive.atlas.hook", "false");
+    properties.put("hive-env", hiveEnv);
+
 
     Map<String, Map<String, String>> parentProperties = new HashMap<String, Map<String, String>>();
     Configuration parentClusterConfig = new Configuration(parentProperties,

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index 0b2b32d..16da70c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -53,6 +53,7 @@ import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
@@ -214,6 +215,7 @@ public class UpgradeCatalog250Test {
     Method updateHiveLlapConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateHiveLlapConfigs");
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
     Method updateTablesForZeppelinViewRemoval = UpgradeCatalog250.class.getDeclaredMethod("updateTablesForZeppelinViewRemoval");
+    Method updateAtlasConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAtlasConfigs");
 
     UpgradeCatalog250 upgradeCatalog250 = createMockBuilder(UpgradeCatalog250.class)
       .addMockedMethod(updateAmsConfigs)
@@ -221,6 +223,7 @@ public class UpgradeCatalog250Test {
       .addMockedMethod(updateHiveLlapConfigs)
       .addMockedMethod(addNewConfigurationsFromXml)
       .addMockedMethod(updateTablesForZeppelinViewRemoval)
+      .addMockedMethod(updateAtlasConfigs)
       .createMock();
 
     upgradeCatalog250.updateAMSConfigs();
@@ -238,6 +241,9 @@ public class UpgradeCatalog250Test {
     upgradeCatalog250.updateTablesForZeppelinViewRemoval();
     expectLastCall().once();
 
+    upgradeCatalog250.updateAtlasConfigs();
+    expectLastCall().once();
+
     replay(upgradeCatalog250);
 
     upgradeCatalog250.executeDMLUpdates();
@@ -372,4 +378,91 @@ public class UpgradeCatalog250Test {
     Map<String, String> updatedProperties = propertiesCapture.getValue();
     assertTrue(Maps.difference(newProperties, updatedProperties).areEqual());
   }
+
+  @Test
+  public void testUpdateAtlasConfigs() throws Exception {
+
+    Map<String, String> oldHiveProperties = new HashMap<String, String>();
+    Map<String, String> newHiveProperties = new HashMap<String, String>();
+
+    oldHiveProperties.put("hive.atlas.hook", "false");
+    newHiveProperties.put("hive.atlas.hook", "true");
+    testUpdateAtlasHookConfig(oldHiveProperties, newHiveProperties, "hive-env");
+
+    Map<String, String> oldStormProperties = new HashMap<String, String>();
+    Map<String, String> newStormProperties = new HashMap<String, String>();
+    oldStormProperties.put("storm.atlas.hook", "false");
+    newStormProperties.put("storm.atlas.hook", "true");
+    testUpdateAtlasHookConfig(oldStormProperties, newStormProperties, "storm-env");
+
+    Map<String, String> oldFalconProperties = new HashMap<String, String>();
+    Map<String, String> newFalconProperties = new HashMap<String, String>();
+    oldFalconProperties.put("falcon.atlas.hook", "false");
+    newFalconProperties.put("falcon.atlas.hook", "true");
+    testUpdateAtlasHookConfig(oldFalconProperties, newFalconProperties, "falcon-env");
+
+    Map<String, String> oldSqoopProperties = new HashMap<String, String>();
+    Map<String, String> newSqoopProperties = new HashMap<String, String>();
+    oldSqoopProperties.put("sqoop.atlas.hook", "false");
+    newSqoopProperties.put("sqoop.atlas.hook", "true");
+    testUpdateAtlasHookConfig(oldSqoopProperties, newSqoopProperties, "sqoop-env");
+  }
+
+  public void testUpdateAtlasHookConfig(Map<String, String> oldProperties, Map<String, String> newProperties, String configType) throws Exception {
+
+    Map<String, Service> installedServices = new HashMap<String, Service>() {
+      {
+        put("ATLAS", null);
+        put("HIVE", null);
+        put("STORM", null);
+        put("FALCON", null);
+        put("SQOOP", null);
+      }
+    };
+
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+    final Service service = createStrictMock(Service.class);
+
+    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", cluster);
+    }}).once();
+    expect(cluster.getClusterName()).andReturn("cl1").once();
+    expect(cluster.getServices()).andReturn(installedServices).atLeastOnce();
+
+    Config mockAtlasConfig = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType(configType)).andReturn(mockAtlasConfig).atLeastOnce();
+    expect(mockAtlasConfig.getProperties()).andReturn(oldProperties).anyTimes();
+
+    Injector injector = easyMockSupport.createNiceMock(Injector.class);
+    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
+
+    replay(injector, clusters, mockAtlasConfig, cluster);
+
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+      .addMockedMethod("createConfiguration")
+      .addMockedMethod("getClusters", new Class[] { })
+      .addMockedMethod("createConfig")
+      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
+      .createNiceMock();
+
+    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+    Capture<Map> propertiesCapture = EasyMock.newCapture();
+
+    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+      anyObject(Map.class))).andReturn(createNiceMock(Config.class)).once();
+
+    replay(controller, injector2);
+    new UpgradeCatalog250(injector2).updateAtlasConfigs();
+    easyMockSupport.verifyAll();
+
+    Map<String, String> updatedProperties = propertiesCapture.getValue();
+    assertTrue(Maps.difference(newProperties, updatedProperties).areEqual());
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8ad494e9/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index 39cbbce..b5ff24c 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -587,7 +587,8 @@ class TestHDP23StackAdvisor(TestCase):
           'hive_exec_orc_storage_strategy': 'SPEED',
           'hive_security_authorization': 'None',
           'hive_timeline_logging_enabled': 'true',
-          'hive_txn_acid': 'off'
+          'hive_txn_acid': 'off',
+          'hive.atlas.hook': 'false'
         }
       },
       'hive-site': {
@@ -745,6 +746,7 @@ class TestHDP23StackAdvisor(TestCase):
         },
         "hive-env": {
           "properties": {
+            "hive.atlas.hook": "false"
           }
         },
         "hive-site": {
@@ -863,7 +865,8 @@ class TestHDP23StackAdvisor(TestCase):
           'hive_exec_orc_storage_strategy': 'SPEED',
           'hive_security_authorization': 'None',
           'hive_timeline_logging_enabled': 'true',
-          'hive_txn_acid': 'off'
+          'hive_txn_acid': 'off',
+          'hive.atlas.hook': 'true'
         }
       },
       'hive-site': {
@@ -1045,6 +1048,7 @@ class TestHDP23StackAdvisor(TestCase):
         },
         "hive-env": {
           "properties": {
+            "hive.atlas.hook": "false"
           }
         },
         "hive-site": {
@@ -1733,6 +1737,11 @@ class TestHDP23StackAdvisor(TestCase):
         "properties": {
           "ranger-storm-plugin-enabled": "No"
         }
+      },
+      "storm-env": {
+        "properties": {
+          "storm.atlas.hook": "true"
+        }
       }
     }
     services = {
@@ -1773,6 +1782,11 @@ class TestHDP23StackAdvisor(TestCase):
           "properties": {
             "ranger-storm-plugin-enabled": "No"
           }
+        },
+        "storm-env": {
+          "properties": {
+          "storm.atlas.hook": "false"
+          }
         }
       },
       "changed-configurations": [ ]
@@ -1830,6 +1844,11 @@ class TestHDP23StackAdvisor(TestCase):
         'properties': {
           'sqoop.job.data.publish.class': 'org.apache.atlas.sqoop.hook.SqoopHook',
         }
+      },
+      'sqoop-env': {
+        'properties': {
+          'sqoop.atlas.hook': 'true'
+        }
       }
     }
     services = {
@@ -1864,6 +1883,11 @@ class TestHDP23StackAdvisor(TestCase):
           "properties": {
             "sqoop.job.data.publish.class": "foo"
           }
+        },
+        "sqoop-env": {
+          "properties": {
+            "sqoop.atlas.hook": "false"
+          }
         }
       },
       "changed-configurations": [ ]


[04/14] ambari git commit: AMBARI-19081. JS error on "User RedHat Satellite" click on step1 installer (onechiporenko)

Posted by jo...@apache.org.
AMBARI-19081. JS error on "User RedHat Satellite" click on step1 installer (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7f6ca9d0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7f6ca9d0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7f6ca9d0

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 7f6ca9d07b2f2e389d9e887853238ca3969d99ce
Parents: 8ad494e
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Mon Dec 5 13:29:05 2016 +0200
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Mon Dec 5 13:29:05 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/styles/wizard.less               |  6 ++++-
 ambari-web/app/templates/wizard/step1.hbs       |  2 +-
 ambari-web/app/views/wizard/step1_view.js       | 21 +++++++++---------
 .../widgets/slider_config_widget_view_test.js   | 23 --------------------
 .../test/views/main/dashboard/widgets_test.js   |  2 +-
 5 files changed, 18 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7f6ca9d0/ambari-web/app/styles/wizard.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/wizard.less b/ambari-web/app/styles/wizard.less
index a4dac8c..57dae45 100644
--- a/ambari-web/app/styles/wizard.less
+++ b/ambari-web/app/styles/wizard.less
@@ -554,6 +554,11 @@
       }
     }
   }
+  .inner-table {
+    td {
+      vertical-align: middle;
+    }
+  }
   .repos-panel {
     .remove-icon {
       color: red;
@@ -567,7 +572,6 @@
     }
     .repo-url input {
       width: 90%;
-      height: 24px;
     }
     #skip-validation, #use-redhat {
       span.disabled {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f6ca9d0/ambari-web/app/templates/wizard/step1.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step1.hbs b/ambari-web/app/templates/wizard/step1.hbs
index 5297d12..8179b41 100644
--- a/ambari-web/app/templates/wizard/step1.hbs
+++ b/ambari-web/app/templates/wizard/step1.hbs
@@ -144,7 +144,7 @@
                                     {{view view.popoverView repositoryBinding="repository"}}
                                   </td>
                                   <td class="col-sm-8">
-                                    <div {{bindAttr class=":repo-url repository.invalidFormatError:textfield-error repository.invalidError:textfield-error"}}>
+                                    <div {{bindAttr class=":repo-url repository.invalidFormatError:has-error repository.invalidError:has-error"}}>
                                       {{view Ember.TextField placeholderBinding="repository.placeholder" valueBinding="repository.baseUrl" disabledBinding="controller.selectedStack.useRedhatSatellite" classNames="form-control"}}
                                       {{#if controller.selectedStack.usePublicRepo}}
                                         <i {{bindAttr class="repository.undo::invisible :icon-undo"}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f6ca9d0/ambari-web/app/views/wizard/step1_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/wizard/step1_view.js b/ambari-web/app/views/wizard/step1_view.js
index d5cbbb5..161d794 100644
--- a/ambari-web/app/views/wizard/step1_view.js
+++ b/ambari-web/app/views/wizard/step1_view.js
@@ -226,17 +226,18 @@ App.WizardStep1View = Em.View.extend({
     checkedBinding: 'controller.selectedStack.useRedhatSatellite',
     disabledBinding: 'controller.selectedStack.usePublicRepo',
     click: function () {
-      // click triggered before value is toggled, so if-statement is inverted
-      if (this.get('disabled')) return;
-      if (!this.get('controller.selectedStack.useRedhatSatellite')) {
-        App.ModalPopup.show({
-          header: Em.I18n.t('common.important'),
-          secondary: false,
-          bodyClass: Ember.View.extend({
-            template: Ember.Handlebars.compile(Em.I18n.t('installer.step1.advancedRepo.useRedhatSatellite.warning'))
-          })
-        });
+      if (!this.get('disabled')) {
+        this.toggleProperty('controller.selectedStack.useRedhatSatellite');
+        if (this.get('controller.selectedStack.useRedhatSatellite')) {
+          App.ModalPopup.show({
+            header: Em.I18n.t('common.important'),
+            encodeBody: false,
+            secondary: false,
+            body: Em.I18n.t('installer.step1.advancedRepo.useRedhatSatellite.warning')
+          });
+        }
       }
+      return false;
     }
   }),
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f6ca9d0/ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js b/ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js
index a6b25ce..5cc055c 100644
--- a/ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js
+++ b/ambari-web/test/views/common/configs/widgets/slider_config_widget_view_test.js
@@ -636,29 +636,6 @@ describe('App.SliderConfigWidgetView', function () {
       expect(viewInt.get('issueMessage')).to.equal('');
     });
 
-    describe('llap_queue_capacity property', function() {
-      beforeEach(function() {
-        viewInt.set('config.name', 'llap_queue_capacity');
-      });
-      it('should validate and warn about llap issue when value is 100%', function() {
-        viewInt.set('config.stackConfigProperty.valueAttributes.maximum', 100);
-        viewInt.set('config.value', '100');
-        viewInt.set('config.errorMessage', '');
-        viewInt.set('config.warnMessage', '');
-        viewInt.set('config.widgetType', 'slider');
-        assert.isTrue(viewInt.isValueCompatibleWithWidget(), 'value should be compatible with widget');
-        assert.equal(viewInt.get('config.warnMessage'), Em.I18n.t('config.warnMessage.llap_queue_capacity.max'), 'warn message validation');
-      });
-
-      it('should pass validation because llap < 100', function() {
-        viewInt.set('config.stackConfigProperty.valueAttributes.maximum', 100);
-        viewInt.set('config.value', '99');
-        viewInt.set('config.errorMessage', '');
-        viewInt.set('config.warnMessage', '');
-        assert.isTrue(viewInt.isValueCompatibleWithWidget(), 'value should be compatible with widget');
-        assert.equal(viewInt.get('config.warnMessage'), '', 'warn message validation');
-      });
-    });
   });
 
   describe('#formatTickLabel', function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f6ca9d0/ambari-web/test/views/main/dashboard/widgets_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/dashboard/widgets_test.js b/ambari-web/test/views/main/dashboard/widgets_test.js
index 707087a..b2d154b 100644
--- a/ambari-web/test/views/main/dashboard/widgets_test.js
+++ b/ambari-web/test/views/main/dashboard/widgets_test.js
@@ -223,7 +223,7 @@ describe('App.MainDashboardWidgetsView', function () {
       expect(view.get('visibleWidgets')).to.be.eql([Em.Object.create({
         id: 1,
         threshold: [],
-        viewClass: App['NameNodeHeapPieChartView'],
+        viewClass: App.NameNodeHeapPieChartView,
         sourceName: 'HDFS',
         title: Em.I18n.t('dashboard.widgets.NameNodeHeap')
       })]);


[06/14] ambari git commit: AMBARI-19058. Perf: Deploy 3000 Agent cluster and find perf bugs. Part 1.(vbrodetskyi)

Posted by jo...@apache.org.
AMBARI-19058. Perf: Deploy 3000 Agent cluster and find perf bugs. Part 1.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/08342b3c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/08342b3c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/08342b3c

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 08342b3cb268481e5033c99cbf418f123e509327
Parents: cac20e0
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Mon Dec 5 15:12:32 2016 +0200
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Mon Dec 5 15:12:32 2016 +0200

----------------------------------------------------------------------
 contrib/utils/perf/deploy-gce-perf-cluster.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/08342b3c/contrib/utils/perf/deploy-gce-perf-cluster.py
----------------------------------------------------------------------
diff --git a/contrib/utils/perf/deploy-gce-perf-cluster.py b/contrib/utils/perf/deploy-gce-perf-cluster.py
index 4737c6f..6de3938 100644
--- a/contrib/utils/perf/deploy-gce-perf-cluster.py
+++ b/contrib/utils/perf/deploy-gce-perf-cluster.py
@@ -28,7 +28,7 @@ import re
 import socket
 
 cluster_prefix = "perf"
-ambari_repo_file_url = "http://s3.amazonaws.com/dev.hortonworks.com/ambari/centos6/2.x/latest/trunk/ambaribn.repo"
+ambari_repo_file_url = "http://s3.amazonaws.com/dev.hortonworks.com/ambari/centos6/2.x/updates/2.5.0.0/ambaribn.repo"
 
 public_hostname_script = "foo"
 hostname_script = "foo"
@@ -285,8 +285,8 @@ def create_vms(args, number_of_nodes):
   time.sleep(10)
 
   # trying to create cluster with needed params
-  print "Creating agent VMs {0}-agent-{1} with {2} large nodes on centos6...".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes))
-  execute_command(args, args.controller, "/usr/sbin/gce up {0}-agent-{1} {2} --centos6 --large".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes)),
+  print "Creating agent VMs {0}-agent-{1} with {2} xlarge nodes on centos6...".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes))
+  execute_command(args, args.controller, "/usr/sbin/gce up {0}-agent-{1} {2} --centos6 --xlarge".format(cluster_prefix, args.cluster_suffix, str(number_of_nodes)),
                   "Failed to create cluster VMs, probably not enough resources!", "-tt")
 
   # VMs are not accessible immediately
@@ -331,7 +331,7 @@ def create_server_script(server_host_name):
   contents = "#!/bin/bash\n" + \
   "wget -O /etc/yum.repos.d/ambari.repo {0}\n".format(ambari_repo_file_url) + \
   "yum clean all; yum install git ambari-server -y\n" + \
-  "mkdir /home ; cd /home ; git clone https://github.com/apache/ambari.git\n" + \
+  "mkdir /home ; cd /home ; git clone https://github.com/apache/ambari.git ; cd ambari ; git checkout branch-2.5\n" + \
   "cp -r /home/ambari/ambari-server/src/main/resources/stacks/PERF /var/lib/ambari-server/resources/stacks/PERF\n" + \
   "cp -r /home/ambari/ambari-server/src/main/resources/stacks/PERF /var/lib/ambari-agent/cache/stacks/PERF\n" + \
   "\n" + \
@@ -384,7 +384,7 @@ def create_agent_script(server_host_name):
   contents = "#!/bin/bash\n" + \
   "wget -O /etc/yum.repos.d/ambari.repo {0}\n".format(ambari_repo_file_url) + \
   "yum clean all; yum install git ambari-agent -y\n" + \
-  "mkdir /home ; cd /home; git clone https://github.com/apache/ambari.git\n" + \
+  "mkdir /home ; cd /home; git clone https://github.com/apache/ambari.git ; cd ambari ; git checkout branch-2.5\n" + \
   "cp -r /home/ambari/ambari-server/src/main/resources/stacks/PERF /var/lib/ambari-agent/cache/stacks/PERF\n" + \
   "sed -i -e 's/hostname=localhost/hostname={0}/g' /etc/ambari-agent/conf/ambari-agent.ini\n".format(server_host_name) + \
   "sed -i -e 's/agent]/agent]\\nhostname_script={0}\\npublic_hostname_script={1}\\n/1' /etc/ambari-agent/conf/ambari-agent.ini\n".format(hostname_script, public_hostname_script) + \


[09/14] ambari git commit: AMBARI-18065. Change Ranger Admin start script to setup db for ranger (magyari_sandor)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5051dcb/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
new file mode 100644
index 0000000..030b717
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-secured.json
@@ -0,0 +1,773 @@
+{
+    "localComponents": [
+        "SECONDARY_NAMENODE", 
+        "HDFS_CLIENT", 
+        "DATANODE", 
+        "NAMENODE", 
+        "RANGER_ADMIN", 
+        "RANGER_TAGSYNC", 
+        "RANGER_USERSYNC", 
+        "ZOOKEEPER_SERVER", 
+        "ZOOKEEPER_CLIENT", 
+        "KERBEROS_CLIENT",
+        "LOGSEARCH_SOLR",
+        "LOGSEARCH_SOLR_CLIENT"
+    ], 
+    "configuration_attributes": {
+        "ranger-hdfs-audit": {}, 
+        "ssl-client": {}, 
+        "ranger-admin-site": {}, 
+        "ranger-hdfs-policymgr-ssl": {}, 
+        "tagsync-application-properties": {}, 
+        "ranger-env": {}, 
+        "usersync-log4j": {}, 
+        "ranger-hdfs-plugin-properties": {}, 
+        "kerberos-env": {}, 
+        "admin-properties": {}, 
+        "ranger-ugsync-site": {}, 
+        "hdfs-site": {
+            "final": {
+                "dfs.datanode.data.dir": "true", 
+                "dfs.namenode.http-address": "true", 
+                "dfs.datanode.failed.volumes.tolerated": "true", 
+                "dfs.support.append": "true", 
+                "dfs.namenode.name.dir": "true", 
+                "dfs.webhdfs.enabled": "true"
+            }
+        }, 
+        "ranger-tagsync-site": {}, 
+        "zoo.cfg": {}, 
+        "hadoop-policy": {}, 
+        "hdfs-log4j": {}, 
+        "krb5-conf": {}, 
+        "core-site": {
+            "final": {
+                "fs.defaultFS": "true"
+            }
+        }, 
+        "hadoop-env": {}, 
+        "zookeeper-log4j": {}, 
+        "ssl-server": {}, 
+        "ranger-site": {}, 
+        "admin-log4j": {}, 
+        "tagsync-log4j": {}, 
+        "ranger-hdfs-security": {}, 
+        "usersync-properties": {}, 
+        "zookeeper-env": {},
+        "infra-solr-env": {},
+        "infra-solr-client-log4j": {},
+        "cluster-env": {}
+    }, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "commandId": "41-2", 
+    "hostname": "c6401.ambari.apache.org", 
+    "kerberosCommandParams": [], 
+    "serviceName": "RANGER", 
+    "role": "RANGER_ADMIN", 
+    "forceRefreshConfigTagsBeforeExecution": [], 
+    "requestId": 41, 
+    "agentConfigParams": {
+        "agent": {
+            "parallel_execution": 0
+        }
+    }, 
+    "clusterName": "test_Cluster01", 
+    "commandType": "EXECUTION_COMMAND", 
+    "taskId": 186, 
+    "roleParams": {}, 
+    "configurationTags": {
+        "ranger-hdfs-audit": {
+            "tag": "version1466705299922"
+        }, 
+        "ssl-client": {
+            "tag": "version1"
+        }, 
+        "ranger-admin-site": {
+            "tag": "version1467016680635"
+        }, 
+        "ranger-hdfs-policymgr-ssl": {
+            "tag": "version1466705299922"
+        }, 
+        "tagsync-application-properties": {
+            "tag": "version1467016680511"
+        }, 
+        "ranger-env": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-ugsync-site": {
+            "tag": "version1467016680537"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "tag": "version1466705299922"
+        }, 
+        "kerberos-env": {
+            "tag": "version1467016537243"
+        }, 
+        "admin-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "hdfs-site": {
+            "tag": "version1467016680401"
+        }, 
+        "ranger-tagsync-site": {
+            "tag": "version1467016680586"
+        }, 
+        "zoo.cfg": {
+            "tag": "version1"
+        }, 
+        "hadoop-policy": {
+            "tag": "version1"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "usersync-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "krb5-conf": {
+            "tag": "version1467016537243"
+        }, 
+        "core-site": {
+            "tag": "version1467016680612"
+        }, 
+        "hadoop-env": {
+            "tag": "version1467016680446"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "ssl-server": {
+            "tag": "version1"
+        }, 
+        "ranger-site": {
+            "tag": "version1466705299949"
+        }, 
+        "admin-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "tagsync-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-hdfs-security": {
+            "tag": "version1466705299922"
+        }, 
+        "usersync-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "zookeeper-env": {
+            "tag": "version1467016680492"
+        },
+        "infra-solr-env": {
+            "tag": "version1467098537360"
+        },
+        "infra-solr-client-log4j": {
+            "tag": "version1467096917836"
+        },
+        "cluster-env": {
+            "tag": "version1467016680567"
+        }
+    }, 
+    "roleCommand": "START", 
+    "hostLevelParams": {
+        "agent_stack_retry_on_unavailability": "false", 
+        "stack_name": "HDP", 
+        "package_version": "2_5_0_0_*", 
+        "custom_mysql_jdbc_name": "mysql-connector-java.jar",
+        "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
+        "host_sys_prepped": "false", 
+        "ambari_db_rca_username": "mapred", 
+        "current_version": "2.6.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
+        "agent_stack_retry_count": "5", 
+        "stack_version": "2.6",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "repository_version_id": "1", 
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "java_version": "8", 
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
+        "db_name": "ambari", 
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
+        "agentCacheDir": "/var/lib/ambari-agent/cache", 
+        "ambari_db_rca_password": "mapred", 
+        "jce_name": "jce_policy-8.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "clientsToUpdateConfigs": "[\"*\"]"
+    }, 
+    "commandParams": {
+        "service_package_folder": "common-services/RANGER/0.4.0/package", 
+        "script": "scripts/ranger_admin.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "version": "2.6.0.0-801",
+        "max_duration_for_retries": "0", 
+        "command_retry_enabled": "false", 
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "forceRefreshConfigTags": [], 
+    "stageId": 2, 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ambari_server_use_ssl": [
+            "false"
+        ], 
+        "all_ping_ports": [
+            "8670"
+        ], 
+        "ranger_tagsync_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ranger_usersync_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ambari_server_port": [
+            "8080"
+        ], 
+        "ranger_admin_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "all_racks": [
+            "/default-rack"
+        ], 
+        "all_ipv4_ips": [
+            "172.22.83.73"
+        ], 
+        "ambari_server_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6401.ambari.apache.org"
+        ],
+        "infra_solr_hosts": [
+            "c6401.ambari.apache.org"
+        ]
+    }, 
+    "configurations": {
+        "ranger-hdfs-audit": {
+            "xasecure.audit.destination.solr.zookeepers": "NONE", 
+            "xasecure.audit.destination.solr.urls": "", 
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool",
+            "xasecure.audit.destination.hdfs": "true", 
+            "xasecure.audit.destination.solr": "false",
+            "xasecure.audit.provider.summary.enabled": "false", 
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "xasecure.audit.is.enabled": "true"
+        }, 
+        "ssl-client": {
+            "ssl.client.truststore.reload.interval": "10000", 
+            "ssl.client.keystore.password": "bigdata", 
+            "ssl.client.truststore.type": "jks", 
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
+            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.keystore.type": "jks"
+        }, 
+        "ranger-admin-site": {
+            "ranger.is.solr.kerberised": "true",
+            "ranger.admin.kerberos.cookie.domain": "{{ranger_host}}", 
+            "ranger.kms.service.user.hdfs": "hdfs", 
+            "ranger.spnego.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
+            "ranger.plugins.hive.serviceuser": "hive", 
+            "ranger.lookup.kerberos.keytab": "/etc/security/keytabs/rangerlookup.service.keytab", 
+            "ranger.plugins.kms.serviceuser": "kms", 
+            "ranger.service.https.attrib.ssl.enabled": "false", 
+            "ranger.sso.browser.useragent": "Mozilla,chrome", 
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
+            "ranger.plugins.hbase.serviceuser": "hbase", 
+            "ranger.plugins.hdfs.serviceuser": "hdfs", 
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
+            "ranger.plugins.knox.serviceuser": "knox", 
+            "ranger.ldap.base.dn": "dc=example,dc=com", 
+            "ranger.sso.publicKey": "", 
+            "ranger.admin.kerberos.cookie.path": "/", 
+            "ranger.service.https.attrib.clientAuth": "want", 
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
+            "ranger.ldap.group.roleattribute": "cn", 
+            "ranger.plugins.kafka.serviceuser": "kafka", 
+            "ranger.admin.kerberos.principal": "rangeradmin/_HOST@EXAMPLE.COM", 
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks",
+            "ranger.ldap.referral": "ignore", 
+            "ranger.service.http.port": "6080", 
+            "ranger.ldap.user.searchfilter": "(uid={0})", 
+            "ranger.plugins.atlas.serviceuser": "atlas", 
+            "ranger.truststore.password": "changeit", 
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
+            "ranger.audit.solr.password": "NONE", 
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr", 
+            "ranger.lookup.kerberos.principal": "rangerlookup/_HOST@EXAMPLE.COM",
+            "ranger.service.https.port": "6182", 
+            "ranger.plugins.storm.serviceuser": "storm", 
+            "ranger.externalurl": "{{ranger_external_url}}", 
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
+            "ranger.kms.service.user.hive": "", 
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
+            "ranger.service.host": "{{ranger_host}}", 
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
+            "ranger.service.https.attrib.keystore.pass": "xasecure", 
+            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin",
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
+            "ranger.audit.solr.username": "ranger_solr", 
+            "ranger.sso.enabled": "false", 
+            "ranger.audit.solr.urls": "", 
+            "ranger.ldap.ad.domain": "", 
+            "ranger.plugins.yarn.serviceuser": "yarn", 
+            "ranger.audit.source.type": "solr", 
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
+            "ranger.authentication.method": "UNIX", 
+            "ranger.service.http.enabled": "true", 
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
+            "ranger.ldap.ad.referral": "ignore", 
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
+            "ranger.jpa.jdbc.password": "_", 
+            "ranger.spnego.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "ranger.sso.providerurl": "", 
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
+            "ranger.admin.kerberos.keytab": "/etc/security/keytabs/rangeradmin.service.keytab", 
+            "ranger.admin.kerberos.token.valid.seconds": "30", 
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.unixauth.service.port": "5151"
+        }, 
+        "ranger-hdfs-policymgr-ssl": {
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
+            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
+        }, 
+        "tagsync-application-properties": {
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.security.protocol": "SASL_PLAINTEXT", 
+            "atlas.jaas.KafkaClient.option.principal": "{{tagsync_jaas_principal}}", 
+            "atlas.jaas.KafkaClient.option.keyTab": "{{tagsync_keytab_path}}", 
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
+            "atlas.jaas.KafkaClient.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", 
+            "atlas.jaas.KafkaClient.option.serviceName": "kafka", 
+            "atlas.kafka.bootstrap.servers": "localhost:6667", 
+            "atlas.jaas.KafkaClient.option.useKeyTab": "true", 
+            "atlas.jaas.KafkaClient.option.storeKey": "true", 
+            "atlas.jaas.KafkaClient.loginModuleControlFlag": "required", 
+            "atlas.kafka.sasl.kerberos.service.name": "kafka"
+        }, 
+        "ranger-env": {
+            "ranger_solr_shards": "1", 
+            "ranger_solr_config_set": "ranger_audits", 
+            "ranger_user": "ranger", 
+            "ranger_solr_replication_factor": "1",
+            "xml_configurations_supported": "true", 
+            "ranger-atlas-plugin-enabled": "No", 
+            "ranger-hbase-plugin-enabled": "No", 
+            "ranger-yarn-plugin-enabled": "No", 
+            "bind_anonymous": "false", 
+            "ranger_admin_username": "amb_ranger_admin", 
+            "admin_password": "admin", 
+            "is_solrCloud_enabled": "true", 
+            "ranger-storm-plugin-enabled": "No", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "ranger_group": "ranger", 
+            "ranger-knox-plugin-enabled": "No", 
+            "ranger_admin_log_dir": "/var/log/ranger/admin", 
+            "ranger-kafka-plugin-enabled": "No", 
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
+            "ranger-hive-plugin-enabled": "No", 
+            "xasecure.audit.destination.solr": "true", 
+            "ranger_pid_dir": "/var/run/ranger", 
+            "xasecure.audit.destination.hdfs": "true", 
+            "admin_username": "admin", 
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "create_db_dbuser": "true", 
+            "ranger_solr_collection_name": "ranger_audits", 
+            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "ranger_usersync_log_dir": "/var/log/ranger/usersync"
+        }, 
+        "usersync-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "hadoop.rpc.protection": "authentication", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "policy_user": "ambari-qa", 
+            "common.name.for.certificate": "", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
+        }, 
+        "kerberos-env": {
+            "kdc_hosts": "c6401.ambari.apache.org", 
+            "manage_auth_to_local": "true", 
+            "install_packages": "true", 
+            "realm": "EXAMPLE.COM", 
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}", 
+            "kdc_create_attributes": "", 
+            "admin_server_host": "c6401.ambari.apache.org", 
+            "group": "ambari-managed-principals", 
+            "password_length": "20", 
+            "ldap_url": "", 
+            "manage_identities": "true", 
+            "password_min_lowercase_letters": "1", 
+            "create_ambari_principal": "true", 
+            "service_check_principal_name": "${cluster_name|toLower()}-${short_date}", 
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
+            "password_chat_timeout": "5", 
+            "kdc_type": "mit-kdc", 
+            "set_password_expiry": "false", 
+            "password_min_punctuation": "1", 
+            "container_dn": "", 
+            "case_insensitive_username_rules": "false", 
+            "password_min_whitespace": "0", 
+            "password_min_uppercase_letters": "1", 
+            "password_min_digits": "1"
+        }, 
+        "admin-properties": {
+            "db_user": "rangeradmin01", 
+            "DB_FLAVOR": "MYSQL", 
+            "db_password": "rangeradmin01", 
+            "db_root_user": "root", 
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
+            "db_name": "ranger01", 
+            "db_host": "c6401.ambari.apache.org", 
+            "db_root_password": "vagrant", 
+            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
+        }, 
+        "ranger-ugsync-site": {
+            "ranger.usersync.ldap.binddn": "", 
+            "ranger.usersync.policymgr.username": "rangerusersync", 
+            "ranger.usersync.policymanager.mockrun": "false", 
+            "ranger.usersync.group.searchbase": "", 
+            "ranger.usersync.ldap.bindalias": "testldapalias", 
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
+            "ranger.usersync.port": "5151", 
+            "ranger.usersync.pagedresultssize": "500", 
+            "ranger.usersync.group.memberattributename": "", 
+            "ranger.usersync.kerberos.principal": "rangerusersync/_HOST@EXAMPLE.COM", 
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
+            "ranger.usersync.ldap.referral": "ignore", 
+            "ranger.usersync.group.searchfilter": "", 
+            "ranger.usersync.ldap.user.objectclass": "person", 
+            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
+            "ranger.usersync.ldap.user.searchfilter": "", 
+            "ranger.usersync.ldap.groupname.caseconversion": "none", 
+            "ranger.usersync.ldap.ldapbindpassword": "", 
+            "ranger.usersync.unix.minUserId": "500", 
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
+            "ranger.usersync.group.nameattribute": "", 
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
+            "ranger.usersync.user.searchenabled": "false", 
+            "ranger.usersync.group.usermapsyncenabled": "true", 
+            "ranger.usersync.ldap.bindkeystore": "", 
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
+            "ranger.usersync.kerberos.keytab": "/etc/security/keytabs/rangerusersync.service.keytab", 
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
+            "ranger.usersync.group.objectclass": "", 
+            "ranger.usersync.ldap.user.searchscope": "sub", 
+            "ranger.usersync.unix.password.file": "/etc/passwd", 
+            "ranger.usersync.ldap.user.nameattribute": "", 
+            "ranger.usersync.pagedresultsenabled": "true", 
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
+            "ranger.usersync.group.search.first.enabled": "false", 
+            "ranger.usersync.group.searchenabled": "false", 
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
+            "ranger.usersync.ssl": "true", 
+            "ranger.usersync.ldap.url": "", 
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
+            "ranger.usersync.ldap.user.searchbase": "", 
+            "ranger.usersync.ldap.username.caseconversion": "none", 
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
+            "ranger.usersync.keystore.password": "UnIx529p", 
+            "ranger.usersync.unix.group.file": "/etc/group", 
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
+            "ranger.usersync.group.searchscope": "", 
+            "ranger.usersync.truststore.password": "changeit", 
+            "ranger.usersync.enabled": "true", 
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.filesource.text.delimiter": ","
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.checkpoint.period": "21600", 
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.content-summary.limit": "5000", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:1019", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.namenode.audit.log.async": "true", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
+            "dfs.blocksize": "134217728", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
+            "dfs.namenode.fslock.fair": "false", 
+            "dfs.datanode.max.transfer.threads": "4096", 
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "50", 
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.datanode.https.address": "0.0.0.0:50475", 
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
+            "nfs.exports.allowed.hosts": "* rw", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.datanode.http.address": "0.0.0.0:1022", 
+            "dfs.datanode.du.reserved": "33011188224", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.http.policy": "HTTP_ONLY", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.client.retry.policy.enabled": "false", 
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.journalnode.https-address": "0.0.0.0:8481", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.encryption.key.provider.uri": "kms://http@c6401.ambari.apache.org:9292/kms", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
+        }, 
+        "ranger-tagsync-site": {
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
+            "ranger.tagsync.source.atlasrest.username": "", 
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
+            "ranger.tagsync.source.file.check.interval.millis": "", 
+            "ranger.tagsync.source.atlasrest.endpoint": "", 
+            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
+            "ranger.tagsync.kerberos.principal": "rangertagsync/_HOST@EXAMPLE.COM", 
+            "ranger.tagsync.kerberos.keytab": "/etc/security/keytabs/rangertagsync.service.keytab", 
+            "ranger.tagsync.source.atlas": "false", 
+            "ranger.tagsync.source.atlasrest": "false", 
+            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.source.file.filename": ""
+        }, 
+        "zoo.cfg": {
+            "clientPort": "2181", 
+            "autopurge.purgeInterval": "24", 
+            "syncLimit": "5", 
+            "dataDir": "/grid/0/hadoop/zookeeper", 
+            "initLimit": "10", 
+            "tickTime": "2000", 
+            "autopurge.snapRetainCount": "30"
+        }, 
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*", 
+            "security.job.task.protocol.acl": "*", 
+            "security.datanode.protocol.acl": "*", 
+            "security.namenode.protocol.acl": "*", 
+            "security.client.datanode.protocol.acl": "*", 
+            "security.inter.tracker.protocol.acl": "*", 
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+            "security.client.protocol.acl": "*", 
+            "security.refresh.policy.protocol.acl": "hadoop", 
+            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.inter.datanode.protocol.acl": "*"
+        }, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
+        }, 
+        "krb5-conf": {
+            "domains": "", 
+            "manage_krb5_conf": "true", 
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  default_ccache_name = /tmp/krb5cc_%{uid}\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\
 n{# Append additional realm declarations below #}", 
+            "conf_dir": "/etc"
+        }, 
+        "core-site": {
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
+            "hadoop.proxyuser.hdfs.groups": "*", 
+            "fs.trash.interval": "360", 
+            "ipc.server.tcpnodelay": "true", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.proxyuser.ambari-server-test_cluster01.groups": "*", 
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "hadoop.security.authentication": "kerberos", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.proxyuser.hdfs.hosts": "*", 
+            "hadoop.proxyuser.HTTP.groups": "users", 
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
+            "hadoop.security.key.provider.path": "kms://http@c6401.ambari.apache.org:9292/kms", 
+            "hadoop.security.authorization": "true", 
+            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
+            "ipc.client.connect.max.retries": "50", 
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa-test_cluster01@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs-test_cluster01@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rangeradmin@EXAMPLE.COM)s/.*/ranger/\nRULE:[2:$1@$0](rangertagsync@EXAMPLE.COM)s/.*/rangertagsync/\nRULE:[2:$1@$0](rangerusersync@EXAMPLE.COM)s/.*/rangerusersync/\nDEFAULT", 
+            "hadoop.proxyuser.ambari-server-test_cluster01.hosts": "c6401.ambari.apache.org", 
+            "ipc.client.connection.maxidletime": "30000"
+        }, 
+        "hadoop-env": {
+            "keyserver_port": "", 
+            "proxyuser_group": "users", 
+            "hdfs_user_nproc_limit": "65536", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "hdfs_user_nofile_limit": "128000", 
+            "hdfs_user": "hdfs", 
+            "hdfs_principal_name": "hdfs-test_cluster01@EXAMPLE.COM", 
+            "keyserver_host": " ", 
+            "namenode_opt_maxnewsize": "128m", 
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
+            "namenode_opt_maxpermsize": "256m", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
+            "namenode_heapsize": "1024m", 
+            "namenode_opt_newsize": "128m", 
+            "nfsgateway_heapsize": "1024", 
+            "dtnode_heapsize": "1024m", 
+            "hadoop_root_logger": "INFO,RFA", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "namenode_opt_permsize": "128m", 
+            "hdfs_tmp_dir": "/tmp"
+        }, 
+        "zookeeper-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
+        }, 
+        "ssl-server": {
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
+            "ssl.server.keystore.keypassword": "bigdata", 
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
+            "ssl.server.keystore.password": "bigdata", 
+            "ssl.server.truststore.password": "bigdata", 
+            "ssl.server.truststore.type": "jks", 
+            "ssl.server.keystore.type": "jks", 
+            "ssl.server.truststore.reload.interval": "10000"
+        }, 
+        "ranger-site": {}, 
+        "admin-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
+        }, 
+        "tagsync-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
+        }, 
+        "ranger-hdfs-security": {
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "xasecure.add-hadoop-authorization": "true"
+        }, 
+        "usersync-properties": {}, 
+        "zookeeper-env": {
+            "zk_server_heapsize": "1024m", 
+            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", 
+            "zk_user": "zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM"
+        },
+        "infra-solr-env": {
+            "infra_solr_datadir": "/opt/logsearch_solr/data",
+            "infra_solr_keystore_location": "/etc/security/serverKeys/logsearch.keyStore.jks",
+            "infra_solr_kerberos_name_rules": "DEFAULT",
+            "infra_solr_user": "infra-solr",
+            "infra_solr_maxmem": "1024",
+            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# By default the script will use JAVA_HOME to determine which java\n# to use, but you can set a specific path for Solr to use without\n# affecting other Java applica
 tions on your server/workstation.\nSOLR_JAVA_HOME={{java64_home}}\n\n# Increase Java Min/Max Heap as needed to support your indexing / query needs\nSOLR_JAVA_MEM=\"-Xms{{logsearch_solr_min_mem}}m -Xmx{{logsearch_solr_max_mem}}m\"\n\n# Enable verbose GC logging\nGC_LOG_OPTS=\"-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails \\\n-XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime\"\n\n# These GC settings have shown to work well for a number of common Solr workloads\nGC_TUNE=\"-XX:NewRatio=3 \\\n-XX:SurvivorRatio=4 \\\n-XX:TargetSurvivorRatio=90 \\\n-XX:MaxTenuringThreshold=8 \\\n-XX:+UseConcMarkSweepGC \\\n-XX:+UseParNewGC \\\n-XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 \\\n-XX:+CMSScavengeBeforeRemark \\\n-XX:PretenureSizeThreshold=64m \\\n-XX:+UseCMSInitiatingOccupancyOnly \\\n-XX:CMSInitiatingOccupancyFraction=50 \\\n-XX:CMSMaxAbortablePrecleanTime=6000 \\\n-XX:+CMSParallelRemarkEnabled \\\n-XX:+ParallelRefProcEnabled\"\
 n\n# Set the ZooKeeper connection string if using an external ZooKeeper ensemble\n# e.g. host1:2181,host2:2181/chroot\n# Leave empty if not using SolrCloud\nZK_HOST=\"{{zookeeper_quorum}}{{logsearch_solr_znode}}\"\n\n# Set the ZooKeeper client timeout (for SolrCloud mode)\nZK_CLIENT_TIMEOUT=\"60000\"\n\n# By default the start script uses \"localhost\"; override the hostname here\n# for production SolrCloud environments to control the hostname exposed to cluster state\n#SOLR_HOST=\"192.168.1.1\"\n\n# By default the start script uses UTC; override the timezone if needed\n#SOLR_TIMEZONE=\"UTC\"\n\n# Set to true to activate the JMX RMI connector to allow remote JMX client applications\n# to monitor the JVM hosting Solr; set to \"false\" to disable that behavior\n# (false is recommended in production environments)\nENABLE_REMOTE_JMX_OPTS=\"true\"\n\n# The script will use SOLR_PORT+10000 for the RMI_PORT or you can set it here\nRMI_PORT={{logsearch_solr_jmx_port}}\n\n# Anything you add to
  the SOLR_OPTS variable will be included in the java\n# start command line as-is, in ADDITION to other options. If you specify the\n# -a option on start script, those options will be appended as well. Examples:\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.autoSoftCommit.maxTime=3000\"\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.autoCommit.maxTime=60000\"\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.clustering.enabled=true\"\n\n# Location where the bin/solr script will save PID files for running instances\n# If not set, the script will create PID files in $SOLR_TIP/bin\nSOLR_PID_DIR={{logsearch_solr_piddir}}\n\n# Path to a directory where Solr creates index files, the specified directory\n# must contain a solr.xml; by default, Solr will use server/solr\nSOLR_HOME={{logsearch_solr_datadir}}\n\n# Solr provides a default Log4J configuration properties file in server/resources\n# however, you may want to customize the log settings and file appender location\n# so you can point the script to use a different log4j.properties f
 ile\nLOG4J_PROPS={{logsearch_solr_conf}}/log4j.properties\n\n# Location where Solr should write logs to; should agree with the file appender\n# settings in server/resources/log4j.properties\nSOLR_LOGS_DIR={{logsearch_solr_log_dir}}\n\n# Sets the port Solr binds to, default is 8983\nSOLR_PORT={{logsearch_solr_port}}\n\n# Be sure to update the paths to the correct keystore for your environment\n{% if logsearch_solr_ssl_enabled %}\nSOLR_SSL_KEY_STORE={{logsearch_solr_keystore_location}}\nSOLR_SSL_KEY_STORE_PASSWORD={{logsearch_solr_keystore_password}}\nSOLR_SSL_TRUST_STORE={{logsearch_solr_keystore_location}}\nSOLR_SSL_TRUST_STORE_PASSWORD={{logsearch_solr_keystore_password}}\nSOLR_SSL_NEED_CLIENT_AUTH=false\nSOLR_SSL_WANT_CLIENT_AUTH=false\n{% endif %}\n\n# Uncomment to set a specific SSL port (-Djetty.ssl.port=N); if not set\n# and you are using SSL, then the start script will use SOLR_PORT for the SSL port\n#SOLR_SSL_PORT=\n\n{% if security_enabled -%}\nSOLR_HOST=`hostname -f`\nSOLR
 _JAAS_FILE={{logsearch_solr_jaas_file}}\nSOLR_KERB_KEYTAB={{logsearch_solr_web_kerberos_keytab}}\nSOLR_KERB_PRINCIPAL={{logsearch_solr_web_kerberos_principal}}\nSOLR_KERB_NAME_RULES={{logsearch_solr_kerberos_name_rules}}\n\nSOLR_AUTHENTICATION_CLIENT_CONFIGURER=\"org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer\"\nSOLR_AUTHENTICATION_OPTS=\" -DauthenticationPlugin=org.apache.solr.security.KerberosPlugin -Djava.security.auth.login.config=$SOLR_JAAS_FILE -Dsolr.kerberos.principal=${SOLR_KERB_PRINCIPAL} -Dsolr.kerberos.keytab=${SOLR_KERB_KEYTAB} -Dsolr.kerberos.cookie.domain=${SOLR_HOST} -Dsolr.kerberos.name.rules=${SOLR_KERB_NAME_RULES}\"\n{% endif %}", 
+            "infra_solr_pid_dir": "/var/run/ambari-infra-solr",
+            "infra_solr_truststore_password": "bigdata",
+            "infra_solr_truststore_type": "jks",
+            "infra_solr_keystore_type": "jks",
+            "infra_solr_log_dir": "/var/log/ambari-infra-solr",
+            "infra_solr_web_kerberos_keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "infra_solr_ssl_enabled": "false",
+            "infra_solr_client_log_dir": "/var/log/ambari-infra-solr-client",
+            "infra_solr_web_kerberos_principal": "HTTP/_HOST@EXAMPLE.COM",
+            "infra_solr_znode": "/ambari-solr",
+            "infra_solr_keystore_password": "bigdata",
+            "infra_solr_port": "8886",
+            "infra_solr_kerberos_principal": "infra-solr/_HOST@EXAMPLE.COM",
+            "infra_solr_jmx_port": "18886",
+            "infra_solr_truststore_location": "/etc/security/serverKeys/infra-solr.trustStore.jks",
+            "infra_solr_minmem": "512",
+            "infra_solr_kerberos_keytab": "/etc/security/keytabs/infra-solr.service.keytab"
+        },
+        "infra-solr-client-log4j": {
+            "infra_solr_client_log_dir" : "/var/log/ambari-infra-solr-client",
+            "content": "# Copyright 2011 The Apache Software Foundation\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nlog4j.rootLogger=INFO,file,stdout,stderr\n\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender
 .file.File={{logsearch_solr_client_log}}\nlog4j.appender.file.MaxFileSize=80MB\nlog4j.appender.file.MaxBackupIndex=60\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.Threshold=INFO\nlog4j.appender.stdout.Target=System.out\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=%m%n\n\nlog4j.appender.stderr=org.apache.log4j.ConsoleAppender\nlog4j.appender.stderr.Threshold=ERROR\nlog4j.appender.stderr.Target=System.err\nlog4j.appender.stderr.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stderr.layout.ConversionPattern=%m%n"
+        },
+        "cluster-env": {
+            "security_enabled": "true", 
+            "override_uid": "true", 
+            "fetch_nonlocal_groups": "true", 
+            "one_dir_per_partition": "true", 
+            "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}", 
+            "ignore_groupsusers_create": "false", 
+            "alerts_repeat_tolerance": "1", 
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "manage_dirs_on_root": "true",
+            "recovery_lifetime_max_count": "1024", 
+            "recovery_type": "AUTO_START", 
+            "ignore_bad_mounts": "false", 
+            "recovery_window_in_minutes": "60", 
+            "user_group": "hadoop", 
+            "stack_tools": "{\n  \"stack_selector\": [\"hdp-select\", \"/usr/bin/hdp-select\", \"hdp-select\"],\n  \"conf_selector\": [\"conf-select\", \"/usr/bin/conf-select\", \"conf-select\"]\n}", 
+            "recovery_retry_interval": "5", 
+            "stack_features": "{\n  \"stack_features\": [\n    {\n      \"name\": \"snappy\",\n      \"description\": \"Snappy compressor/decompressor support\",\n      \"min_version\": \"2.0.0.0\",\n      \"max_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"lzo\",\n      \"description\": \"LZO libraries support\",\n      \"min_version\": \"2.2.1.0\"\n    },\n    {\n      \"name\": \"express_upgrade\",\n      \"description\": \"Express upgrade support\",\n      \"min_version\": \"2.1.0.0\"\n    },\n    {\n      \"name\": \"rolling_upgrade\",\n      \"description\": \"Rolling upgrade support\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"config_versioning\",\n      \"description\": \"Configurable versions support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"datanode_non_root\",\n      \"description\": \"DataNode running as non-root support (AMBARI-7615)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\
 ": \"remove_ranger_hdfs_plugin_env\",\n      \"description\": \"HDFS removes Ranger env files (AMBARI-14299)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"ranger\",\n      \"description\": \"Ranger Service support\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_tagsync_component\",\n      \"description\": \"Ranger Tagsync component support (AMBARI-14383)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"phoenix\",\n      \"description\": \"Phoenix Service support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"nfs\",\n      \"description\": \"NFS support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"tez_for_spark\",\n      \"description\": \"Tez dependency for Spark\",\n      \"min_version\": \"2.2.0.0\",\n      \"max_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"timeline_state_store\",\n      \"description\": \"Yarn application timeline-se
 rvice supports state store property (AMBARI-11442)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"copy_tarball_to_hdfs\",\n      \"description\": \"Copy tarball to HDFS support (AMBARI-12113)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"spark_16plus\",\n      \"description\": \"Spark 1.6+\",\n      \"min_version\": \"2.4.0.0\"\n    },\n    {\n      \"name\": \"spark_thriftserver\",\n      \"description\": \"Spark Thrift Server\",\n      \"min_version\": \"2.3.2.0\"\n    },\n    {\n      \"name\": \"storm_kerberos\",\n      \"description\": \"Storm Kerberos support (AMBARI-7570)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"storm_ams\",\n      \"description\": \"Storm AMS integration (AMBARI-10710)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"create_kafka_broker_id\",\n      \"description\": \"Ambari should create Kafka Broker Id (AMBARI-12678)\",\n      \"min_version\": \"2.2
 .0.0\",\n      \"max_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"kafka_listeners\",\n      \"description\": \"Kafka listeners (AMBARI-10984)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"kafka_kerberos\",\n      \"description\": \"Kafka Kerberos support (AMBARI-10984)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"pig_on_tez\",\n      \"description\": \"Pig on Tez support (AMBARI-7863)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_usersync_non_root\",\n      \"description\": \"Ranger Usersync as non-root user (AMBARI-10416)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"ranger_audit_db_support\",\n      \"description\": \"Ranger Audit to DB support\",\n      \"min_version\": \"2.2.0.0\",\n      \"max_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"accumulo_kerberos_user_auth\",\n      \"description\": \"Accumulo Kerberos User Auth (AMBARI-10163)\",\n
       \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"knox_versioned_data_dir\",\n      \"description\": \"Use versioned data dir for Knox (AMBARI-13164)\",\n      \"min_version\": \"2.3.2.0\"\n    },\n    {\n      \"name\": \"knox_sso_topology\",\n      \"description\": \"Knox SSO Topology support (AMBARI-13975)\",\n      \"min_version\": \"2.3.8.0\"\n    },\n    {\n      \"name\": \"atlas_rolling_upgrade\",\n      \"description\": \"Rolling upgrade support for Atlas\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"oozie_admin_user\",\n      \"description\": \"Oozie install user as an Oozie admin user (AMBARI-7976)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_create_hive_tez_configs\",\n      \"description\": \"Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_setup_shared_lib\",\n      \"description\": \"Oozie 
 setup tools used to shared Oozie lib to HDFS (AMBARI-7240)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_host_kerberos\",\n      \"description\": \"Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)\",\n      \"min_version\": \"2.0.0.0\",\n      \"max_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"falcon_extensions\",\n      \"description\": \"Falcon Extension\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_metastore_upgrade_schema\",\n      \"description\": \"Hive metastore upgrade schema support (AMBARI-11176)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_server_interactive\",\n      \"description\": \"Hive server interactive support (AMBARI-15573)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_webhcat_specific_configs\",\n      \"description\": \"Hive webhcat specific configurations support (AMBARI-12364)\",\n      \"min_ver
 sion\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_purge_table\",\n      \"description\": \"Hive purge table support (AMBARI-12260)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_server2_kerberized_env\",\n      \"description\": \"Hive server2 working on kerberized environment (AMBARI-13749)\",\n      \"min_version\": \"2.2.3.0\",\n      \"max_version\": \"2.2.5.0\"\n    },\n    {\n      \"name\": \"hive_env_heapsize\",\n      \"description\": \"Hive heapsize property defined in hive-env (AMBARI-12801)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_kms_hsm_support\",\n      \"description\": \"Ranger KMS HSM support (AMBARI-15752)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_log4j_support\",\n      \"description\": \"Ranger supporting log-4j properties (AMBARI-15681)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_kerberos_support\",\n      \"desc
 ription\": \"Ranger Kerberos support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_metastore_site_support\",\n      \"description\": \"Hive Metastore site support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_usersync_password_jceks\",\n      \"description\": \"Saving Ranger Usersync credentials in jceks\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_install_infra_client\",\n      \"description\": \"LogSearch Service support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hbase_home_directory\",\n      \"description\": \"Hbase home directory in HDFS needed for HBASE backup\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"spark_livy\",\n      \"description\": \"Livy as slave component of spark\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"atlas_ranger_plugin_support\",\n      \"description\": \"Atlas Ranger plug
 in support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_pid_support\",\n      \"description\": \"Ranger Service support pid generation AMBARI-16756\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_kms_pid_support\",\n      \"description\": \"Ranger KMS Service support pid generation\",\n      \"min_version\": \"2.5.0.0\"\n    }\n  ]\n}",
+            "recovery_enabled": "true", 
+            "smokeuser_principal_name": "ambari-qa-test_cluster01@EXAMPLE.COM", 
+            "recovery_max_count": "6", 
+            "stack_root": "/usr/hdp", 
+            "repo_suse_rhel_template": "[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0", 
+            "ambari_principal_name": "ambari-server-test_cluster01@EXAMPLE.COM", 
+            "managed_hdfs_resource_property_names": "", 
+            "smokeuser": "ambari-qa"
+        }
+    }
+}
\ No newline at end of file


[13/14] ambari git commit: AMBARI-19036. Add default roles to auto created views. (Ashwin Rajeev via dipayanb)

Posted by jo...@apache.org.
AMBARI-19036. Add default roles to auto created views. (Ashwin Rajeev via dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8fbc2718
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8fbc2718
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8fbc2718

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 8fbc2718e65a3771a6af9acc48f1b06d13c32bd4
Parents: b2d2ef1
Author: Dipayan Bhowmick <di...@gmail.com>
Authored: Tue Dec 6 01:01:43 2016 +0530
Committer: Dipayan Bhowmick <di...@gmail.com>
Committed: Tue Dec 6 01:03:08 2016 +0530

----------------------------------------------------------------------
 .../src/main/resources/view.xml                 | 23 +++++++++++---------
 contrib/views/files/src/main/resources/view.xml |  7 ++++++
 .../views/hive-next/src/main/resources/view.xml |  7 ++++++
 contrib/views/tez/src/main/resources/view.xml   |  7 ++++++
 4 files changed, 34 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8fbc2718/contrib/views/capacity-scheduler/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/view.xml b/contrib/views/capacity-scheduler/src/main/resources/view.xml
index 84a9ee0..cba243c 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/view.xml
+++ b/contrib/views/capacity-scheduler/src/main/resources/view.xml
@@ -23,19 +23,22 @@
     <min-ambari-version>2.1.*</min-ambari-version>
     <cluster-config-options>AMBARI-ONLY</cluster-config-options>
 
-  <resource>
-      <name>scheduler</name>
-      <service-class>org.apache.ambari.view.capacityscheduler.CapacitySchedulerService</service-class>
+    <resource>
+        <name>scheduler</name>
+        <service-class>org.apache.ambari.view.capacityscheduler.CapacitySchedulerService</service-class>
     </resource>
 
     <auto-instance>
-      <name>AUTO_CS_INSTANCE</name>
-      <label>YARN Queue Manager</label>
-      <description>Manage YARN Capacity Scheduler Queues</description>
-      <stack-id>HDP-2.*</stack-id>
-      <services>
-        <service>YARN</service>
-      </services>
+        <name>AUTO_CS_INSTANCE</name>
+        <label>YARN Queue Manager</label>
+        <description>Manage YARN Capacity Scheduler Queues</description>
+        <stack-id>HDP-2.*</stack-id>
+        <services>
+            <service>YARN</service>
+        </services>
+        <roles>
+            <role>CLUSTER.ADMINISTRATOR</role>
+        </roles>
     </auto-instance>
 
 </view>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fbc2718/contrib/views/files/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/files/src/main/resources/view.xml b/contrib/views/files/src/main/resources/view.xml
index d6b2fde..640cee9 100644
--- a/contrib/views/files/src/main/resources/view.xml
+++ b/contrib/views/files/src/main/resources/view.xml
@@ -155,5 +155,12 @@
         <services>
           <service>HDFS</service>
         </services>
+        <roles>
+            <role>CLUSTER.ADMINISTRATOR</role>
+            <role>CLUSTER.OPERATOR</role>
+            <role>SERVICE.ADMINISTRATOR</role>
+            <role>SERVICE.OPERATOR</role>
+            <role>CLUSTER.USER</role>
+        </roles>
     </auto-instance>
 </view>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fbc2718/contrib/views/hive-next/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/hive-next/src/main/resources/view.xml b/contrib/views/hive-next/src/main/resources/view.xml
index 02e632c..1107cd1 100644
--- a/contrib/views/hive-next/src/main/resources/view.xml
+++ b/contrib/views/hive-next/src/main/resources/view.xml
@@ -337,5 +337,12 @@
         <services>
             <service>HIVE</service>
         </services>
+        <roles>
+            <role>CLUSTER.ADMINISTRATOR</role>
+            <role>CLUSTER.OPERATOR</role>
+            <role>SERVICE.ADMINISTRATOR</role>
+            <role>SERVICE.OPERATOR</role>
+            <role>CLUSTER.USER</role>
+        </roles>
     </auto-instance>
 </view>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8fbc2718/contrib/views/tez/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/tez/src/main/resources/view.xml b/contrib/views/tez/src/main/resources/view.xml
index 2bb2bb0..41ffdbc 100644
--- a/contrib/views/tez/src/main/resources/view.xml
+++ b/contrib/views/tez/src/main/resources/view.xml
@@ -91,5 +91,12 @@ limitations under the License. Kerberos, LDAP, Custom. Binary/Htt
     <services>
       <service>TEZ</service>
     </services>
+    <roles>
+      <role>CLUSTER.ADMINISTRATOR</role>
+      <role>CLUSTER.OPERATOR</role>
+      <role>SERVICE.ADMINISTRATOR</role>
+      <role>SERVICE.OPERATOR</role>
+      <role>CLUSTER.USER</role>
+    </roles>
   </auto-instance>
 </view>


[02/14] ambari git commit: AMBARI-19055 - Removing Tasks From host_role_command Causes Upgrades To Show As PENDING (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-19055 - Removing Tasks From host_role_command Causes Upgrades To Show As PENDING (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/12a3cd87
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/12a3cd87
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/12a3cd87

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 12a3cd87184167cadf3cd12eb9b3c9d67dd3907a
Parents: 5ec73bc
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Sat Dec 3 08:35:12 2016 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Sat Dec 3 14:09:44 2016 -0500

----------------------------------------------------------------------
 .../controller/internal/CalculatedStatus.java   | 52 +++++++++++++++-----
 .../internal/RequestResourceProvider.java       | 34 ++++++++-----
 .../internal/StageResourceProvider.java         |  2 +-
 .../ambari/server/topology/TopologyManager.java | 19 ++++++-
 .../internal/CalculatedStatusTest.java          | 31 ++++++++++++
 .../internal/RequestResourceProviderTest.java   | 12 +++--
 6 files changed, 121 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/12a3cd87/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
index 3a86aef..3c415df 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/CalculatedStatus.java
@@ -53,6 +53,17 @@ public class CalculatedStatus {
    */
   private final double percent;
 
+  /**
+   * A status which represents a COMPLETED state at 0%
+   */
+  public static final CalculatedStatus COMPLETED = new CalculatedStatus(HostRoleStatus.COMPLETED,
+      HostRoleStatus.COMPLETED, 100.0);
+
+  /**
+   * A status which represents a PENDING state at 0%
+   */
+  public static final CalculatedStatus PENDING = new CalculatedStatus(HostRoleStatus.PENDING,
+      HostRoleStatus.PENDING, 0.0);
 
   // ----- Constructors ------------------------------------------------------
 
@@ -79,12 +90,6 @@ public class CalculatedStatus {
     this.percent = percent;
   }
 
-  /**
-   * Static factory method to get Status that represents a Completed state
-   */
-  public static CalculatedStatus getCompletedStatus() {
-    return new CalculatedStatus(HostRoleStatus.COMPLETED, HostRoleStatus.COMPLETED, 100.0);
-  }
 
   // ----- CalculatedStatus --------------------------------------------------
 
@@ -291,14 +296,25 @@ public class CalculatedStatus {
   }
 
   /**
-   * Calculates the overall status of an upgrade.
-   * @param stageDto  the map of stage-to-summary value objects
-   * @param stageIds  the stage ids to consider from the value objects
+   * Calculates the overall status of an upgrade. If there are no tasks, then a
+   * status of {@link HostRoleStatus#COMPLETED} is returned.
+   *
+   * @param stageDto
+   *          the map of stage-to-summary value objects
+   * @param stageIds
+   *          the stage ids to consider from the value objects
    * @return the calculated status
    */
   public static CalculatedStatus statusFromStageSummary(Map<Long, HostRoleCommandStatusSummaryDTO> stageDto,
       Set<Long> stageIds) {
 
+    // if either are empty, then we have no tasks and therefore no status - we
+    // should return COMPLETED. This can happen if someone removes all tasks but
+    // leaves the stages and request
+    if (stageDto.isEmpty() || stageIds.isEmpty()) {
+      return COMPLETED;
+    }
+
     Collection<HostRoleStatus> stageStatuses = new HashSet<>();
     Collection<HostRoleStatus> stageDisplayStatuses = new HashSet<>();
     Collection<HostRoleStatus> taskStatuses = new ArrayList<>();
@@ -378,19 +394,28 @@ public class CalculatedStatus {
    */
   public static HostRoleStatus calculateSummaryStatusOfStage(Map<HostRoleStatus, Integer> counters,
       int total, boolean skippable) {
+
+    // when there are 0 tasks, return COMPLETED
+    if (total == 0) {
+      return HostRoleStatus.COMPLETED;
+    }
+
     if (counters.get(HostRoleStatus.PENDING) == total) {
       return HostRoleStatus.PENDING;
     }
+
     // By definition, any tasks in a future stage must be held in a PENDING status.
     if (counters.get(HostRoleStatus.HOLDING) > 0 || counters.get(HostRoleStatus.HOLDING_FAILED) > 0 || counters.get(HostRoleStatus.HOLDING_TIMEDOUT) > 0) {
       return counters.get(HostRoleStatus.HOLDING) > 0 ? HostRoleStatus.HOLDING :
       counters.get(HostRoleStatus.HOLDING_FAILED) > 0 ? HostRoleStatus.HOLDING_FAILED :
       HostRoleStatus.HOLDING_TIMEDOUT;
     }
+
     // Because tasks are not skippable, guaranteed to be FAILED
     if (counters.get(HostRoleStatus.FAILED) > 0 && !skippable) {
       return HostRoleStatus.FAILED;
     }
+
     // Because tasks are not skippable, guaranteed to be TIMEDOUT
     if (counters.get(HostRoleStatus.TIMEDOUT) > 0  && !skippable) {
       return HostRoleStatus.TIMEDOUT;
@@ -401,9 +426,11 @@ public class CalculatedStatus {
     if (counters.get(HostRoleStatus.ABORTED) > 0 && numActiveTasks == 0) {
       return HostRoleStatus.ABORTED;
     }
+
     if (counters.get(HostRoleStatus.COMPLETED) == total) {
       return HostRoleStatus.COMPLETED;
     }
+
     return HostRoleStatus.IN_PROGRESS;
   }
 
@@ -415,7 +442,8 @@ public class CalculatedStatus {
    *
    * @return summary request status based on statuses of tasks in different states.
    */
-  private static HostRoleStatus calculateSummaryStatusOfUpgrade(Map<HostRoleStatus, Integer> counters, int total) {
+  protected static HostRoleStatus calculateSummaryStatusOfUpgrade(
+      Map<HostRoleStatus, Integer> counters, int total) {
     return calculateSummaryStatusOfStage(counters, total, false);
   }
 
@@ -428,8 +456,8 @@ public class CalculatedStatus {
    *
    * @return summary request status based on statuses of tasks in different states.
    */
-  private static HostRoleStatus calculateSummaryDisplayStatus(Map<HostRoleStatus, Integer> counters,
-                                                              int total, boolean skippable) {
+  protected static HostRoleStatus calculateSummaryDisplayStatus(
+      Map<HostRoleStatus, Integer> counters, int total, boolean skippable) {
     return counters.get(HostRoleStatus.SKIPPED_FAILED) > 0 ? HostRoleStatus.SKIPPED_FAILED :
            counters.get(HostRoleStatus.FAILED) > 0 ? HostRoleStatus.FAILED:
            calculateSummaryStatusOfStage(counters, total, skippable);

http://git-wip-us.apache.org/repos/asf/ambari/blob/12a3cd87/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
index 8c1bc57..0690ee7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
@@ -69,10 +69,10 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.topology.LogicalRequest;
 import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.commons.lang.StringUtils;
 
 import com.google.common.collect.Sets;
 import com.google.inject.Inject;
-import org.apache.commons.lang.StringUtils;
 
 /**
  * Resource provider for request resources.
@@ -566,13 +566,18 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
 
     @Override
     public boolean equals(Object o) {
-      if (this == o) return true;
-      if (o == null || getClass() != o.getClass()) return false;
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
 
       ServiceComponentTuple that = (ServiceComponentTuple) o;
 
-      if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null)
+      if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) {
         return false;
+      }
       return !(componentName != null ? !componentName.equals(that.componentName) : that.componentName != null);
 
     }
@@ -739,15 +744,20 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
     // get summaries from TopologyManager for logical requests
     summary.putAll(topologyManager.getStageSummaries(entity.getRequestId()));
 
+    // summary might be empty due to delete host have cleared all
+    // HostRoleCommands or due to hosts haven't registered yet with the cluster
+    // when the cluster is provisioned with a Blueprint
+    final CalculatedStatus status;
     LogicalRequest logicalRequest = topologyManager.getRequest(entity.getRequestId());
-
-    CalculatedStatus status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
-    if (summary.isEmpty() && logicalRequest == null) {
-
-      // summary might be empty due to delete host have cleared all HostRoleCommands
-      // or due to hosts haven't registered yet with the cluster when the cluster is provisioned
-      // with a Blueprint
-      status = CalculatedStatus.getCompletedStatus();
+    if (summary.isEmpty() && null != logicalRequest) {
+      // in this case, it appears that there are no tasks but this is a logical
+      // topology request, so it's a matter of hosts simply not registering yet
+      // for tasks to be created
+      status = CalculatedStatus.PENDING;
+    } else {
+      // there are either tasks or this is not a logical request, so do normal
+      // status calculations
+      status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
     }
 
     setResourceProperty(resource, REQUEST_STATUS_PROPERTY_ID, status.getStatus().toString(), requestedPropertyIds);

http://git-wip-us.apache.org/repos/asf/ambari/blob/12a3cd87/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
index 59dd9d9..a778882 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StageResourceProvider.java
@@ -339,7 +339,7 @@ public class StageResourceProvider extends AbstractControllerResourceProvider im
     CalculatedStatus status;
     if (summary.isEmpty()) {
       // Delete host might have cleared all HostRoleCommands
-      status = CalculatedStatus.getCompletedStatus();
+      status = CalculatedStatus.COMPLETED;
     } else {
       status = CalculatedStatus.statusFromStageSummary(summary, Collections.singleton(entity.getStageId()));
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/12a3cd87/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index d6a4bdd..d527b2d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@ -596,6 +596,14 @@ public class TopologyManager {
     return clusterTopologyMap.get(clusterId);
   }
 
+  /**
+   * Gets a map of components keyed by host which have operations in the
+   * {@link HostRoleStatus#PENDING} state. This could either be because hosts
+   * have not registered or becuase the operations are actually waiting to be
+   * queued.
+   *
+   * @return a mapping of host with pending components.
+   */
   public Map<String, Collection<String>> getPendingHostComponents() {
     ensureInitialized();
     Map<String, Collection<String>> hostComponentMap = new HashMap<String, Collection<String>>();
@@ -603,7 +611,16 @@ public class TopologyManager {
     for (LogicalRequest logicalRequest : allRequests.values()) {
       Map<Long, HostRoleCommandStatusSummaryDTO> summary = logicalRequest.getStageSummaries();
       final CalculatedStatus status = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
-      if (status.getStatus().isInProgress()) {
+
+      // either use the calculated status of the stage or the fact that there
+      // are no tasks and the request has no end time to determine if the
+      // request is still in progress
+      boolean logicalRequestInProgress = false;
+      if (status.getStatus().isInProgress() || (summary.isEmpty() && logicalRequest.getEndTime() <= 0) ) {
+        logicalRequestInProgress = true;
+      }
+
+      if (logicalRequestInProgress) {
         Map<String, Collection<String>> requestTopology = logicalRequest.getProjectedTopology();
         for (Map.Entry<String, Collection<String>> entry : requestTopology.entrySet()) {
           String host = entry.getKey();

http://git-wip-us.apache.org/repos/asf/ambari/blob/12a3cd87/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
index 6f592cd..a96f395 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/CalculatedStatusTest.java
@@ -657,6 +657,37 @@ public class CalculatedStatusTest {
     assertEquals(HostRoleStatus.IN_PROGRESS, calc.getStatus());
   }
 
+  /**
+   * Tests that when there are no tasks and all counts are 0, that the returned
+   * status is {@link HostRoleStatus#COMPLETED}.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testGetCompletedStatusForNoTasks() throws Exception {
+    // no status / no tasks
+    CalculatedStatus status = CalculatedStatus.statusFromTaskEntities(
+        new ArrayList<HostRoleCommandEntity>(), false);
+
+    assertEquals(HostRoleStatus.COMPLETED, status.getStatus());
+
+    // empty summaries
+    status = CalculatedStatus.statusFromStageSummary(
+        new HashMap<Long, HostRoleCommandStatusSummaryDTO>(), new HashSet<Long>());
+
+    assertEquals(HostRoleStatus.COMPLETED, status.getStatus());
+
+    // generate a map of 0's - COMPLETED=0, IN_PROGRESS=0, etc
+    Map<HostRoleStatus, Integer> counts = CalculatedStatus.calculateStatusCounts(new ArrayList<HostRoleStatus>());
+    Map<HostRoleStatus, Integer> displayCounts = CalculatedStatus.calculateStatusCounts(new ArrayList<HostRoleStatus>());
+
+    HostRoleStatus hostRoleStatus = CalculatedStatus.calculateSummaryStatusOfUpgrade(counts, 0);
+    HostRoleStatus hostRoleDisplayStatus = CalculatedStatus.calculateSummaryDisplayStatus(displayCounts, 0, false);
+
+    assertEquals(HostRoleStatus.COMPLETED, hostRoleStatus);
+    assertEquals(HostRoleStatus.COMPLETED, hostRoleDisplayStatus);
+  }
+
   private Collection<HostRoleCommandEntity> getTaskEntities(HostRoleStatus... statuses) {
     Collection<HostRoleCommandEntity> entities = new LinkedList<HostRoleCommandEntity>();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/12a3cd87/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
index 5dfc74d..f7dff11 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
@@ -20,11 +20,10 @@ package org.apache.ambari.server.controller.internal;
 
 
 import static org.apache.ambari.server.controller.internal.HostComponentResourceProvider.HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID;
-import org.apache.ambari.server.topology.Blueprint;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.newCapture;
 import static org.powermock.api.easymock.PowerMock.createMock;
 import static org.powermock.api.easymock.PowerMock.createNiceMock;
@@ -77,6 +76,7 @@ import org.apache.ambari.server.security.authorization.AuthorizationHelperInitia
 import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.LogicalRequest;
@@ -1200,7 +1200,7 @@ public class RequestResourceProviderTest {
       }
     }
     Assert.assertNotNull(propertyIdToAssert);
-    Assert.assertEquals("true", (String) propertyValueToAssert);
+    Assert.assertEquals("true", propertyValueToAssert);
   }
 
   @Test
@@ -1624,6 +1624,12 @@ public class RequestResourceProviderTest {
     verify(managementController, actionManager, clusters, requestMock, requestDAO, hrcDAO);
   }
 
+  /**
+   * Tests that topology requests return different status (PENDING) if there are
+   * no tasks. Normal requests should return COMPLETED.
+   *
+   * @throws Exception
+   */
   @Test
   @PrepareForTest(AmbariServer.class)
   public void testGetLogicalRequestStatusWithNoTasks() throws Exception {


[10/14] ambari git commit: AMBARI-18065. Change Ranger Admin start script to setup db for ranger (magyari_sandor)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f5051dcb/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
new file mode 100644
index 0000000..1d5adff
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/configs/ranger-admin-default.json
@@ -0,0 +1,704 @@
+{
+    "localComponents": [
+        "NAMENODE", 
+        "SECONDARY_NAMENODE", 
+        "ZOOKEEPER_SERVER", 
+        "DATANODE", 
+        "HDFS_CLIENT", 
+        "ZOOKEEPER_CLIENT", 
+        "RANGER_USERSYNC", 
+        "RANGER_ADMIN", 
+        "RANGER_TAGSYNC",
+        "LOGSEARCH_SOLR",
+        "LOGSEARCH_SOLR_CLIENT"
+    ], 
+    "configuration_attributes": {
+        "ranger-hdfs-audit": {}, 
+        "ssl-client": {}, 
+        "ranger-admin-site": {}, 
+        "ranger-hdfs-policymgr-ssl": {}, 
+        "tagsync-application-properties": {}, 
+        "ranger-env": {}, 
+        "usersync-log4j": {}, 
+        "admin-properties": {}, 
+        "ranger-ugsync-site": {}, 
+        "hdfs-site": {
+            "final": {
+                "dfs.datanode.data.dir": "true", 
+                "dfs.namenode.http-address": "true", 
+                "dfs.datanode.failed.volumes.tolerated": "true", 
+                "dfs.support.append": "true", 
+                "dfs.namenode.name.dir": "true", 
+                "dfs.webhdfs.enabled": "true"
+            }
+        }, 
+        "ranger-tagsync-site": {}, 
+        "zoo.cfg": {}, 
+        "hadoop-policy": {}, 
+        "hdfs-log4j": {}, 
+        "ranger-hdfs-plugin-properties": {}, 
+        "core-site": {
+            "final": {
+                "fs.defaultFS": "true"
+            }
+        }, 
+        "hadoop-env": {}, 
+        "zookeeper-log4j": {}, 
+        "ssl-server": {}, 
+        "ranger-site": {}, 
+        "admin-log4j": {}, 
+        "tagsync-log4j": {}, 
+        "ranger-hdfs-security": {}, 
+        "usersync-properties": {}, 
+        "zookeeper-env": {},
+        "infra-solr-env": {},
+        "infra-solr-client-log4j": {},
+        "cluster-env": {}
+    }, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "commandId": "11-0", 
+    "hostname": "c6401.ambari.apache.org", 
+    "kerberosCommandParams": [], 
+    "serviceName": "RANGER", 
+    "role": "RANGER_ADMIN", 
+    "forceRefreshConfigTagsBeforeExecution": [], 
+    "requestId": 11, 
+    "agentConfigParams": {
+        "agent": {
+            "parallel_execution": 0
+        }
+    }, 
+    "clusterName": "c1", 
+    "commandType": "EXECUTION_COMMAND", 
+    "taskId": 31, 
+    "roleParams": {}, 
+    "configurationTags": {
+        "ranger-hdfs-audit": {
+            "tag": "version1466705299922"
+        }, 
+        "ssl-client": {
+            "tag": "version1"
+        }, 
+        "ranger-admin-site": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-hdfs-policymgr-ssl": {
+            "tag": "version1466705299922"
+        }, 
+        "tagsync-application-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-env": {
+            "tag": "version1466705299949"
+        }, 
+        "usersync-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "admin-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-ugsync-site": {
+            "tag": "version1466705299949"
+        }, 
+        "hdfs-site": {
+            "tag": "version1"
+        }, 
+        "ranger-tagsync-site": {
+            "tag": "version1466705299949"
+        }, 
+        "zoo.cfg": {
+            "tag": "version1"
+        }, 
+        "hadoop-policy": {
+            "tag": "version1"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "tag": "version1466705299922"
+        }, 
+        "core-site": {
+            "tag": "version1"
+        }, 
+        "hadoop-env": {
+            "tag": "version1"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "ssl-server": {
+            "tag": "version1"
+        }, 
+        "ranger-site": {
+            "tag": "version1466705299949"
+        }, 
+        "admin-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "tagsync-log4j": {
+            "tag": "version1466705299949"
+        }, 
+        "ranger-hdfs-security": {
+            "tag": "version1466705299922"
+        }, 
+        "usersync-properties": {
+            "tag": "version1466705299949"
+        }, 
+        "zookeeper-env": {
+            "tag": "version1"
+        },
+        "infra-solr-env": {
+            "tag": "version1467098537360"
+        },
+        "infra-solr-client-log4j": {
+            "tag": "version1467096917836"
+        },
+        "cluster-env": {
+            "tag": "version1"
+        }
+    }, 
+    "roleCommand": "START", 
+    "hostLevelParams": {
+        "agent_stack_retry_on_unavailability": "false", 
+        "stack_name": "HDP", 
+        "package_version": "2_5_0_0_*", 
+        "custom_mysql_jdbc_name": "mysql-connector-java.jar",
+        "previous_custom_mysql_jdbc_name": "mysql-connector-java-old.jar",
+        "host_sys_prepped": "false", 
+        "ambari_db_rca_username": "mapred", 
+        "current_version": "2.6.0.0-801",
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
+        "agent_stack_retry_count": "5", 
+        "stack_version": "2.6",
+        "jdk_name": "jdk-8u60-linux-x64.tar.gz", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45", 
+        "repository_version_id": "1", 
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "not_managed_hdfs_path_list": "[\"/tmp\"]", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "java_version": "8", 
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.5\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.5.0.0-801\",\"baseSaved\":true},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.21\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.21/repos/centos6\",\"baseSaved\":true}]", 
+        "package_list": "[{\"name\":\"ranger_${stack_version}-admin\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-usersync\",\"condition\":\"\",\"skipUpgrade\":false},{\"name\":\"ranger_${stack_version}-tagsync\",\"condition\":\"should_install_ranger_tagsync\",\"skipUpgrade\":false},{\"name\":\"ambari-logsearch-solr-client\",\"condition\":\"should_install_logsearch_solr_client\",\"skipUpgrade\":false}]", 
+        "db_name": "ambari", 
+        "group_list": "[\"ranger\",\"hadoop\",\"users\"]", 
+        "agentCacheDir": "/var/lib/ambari-agent/cache", 
+        "ambari_db_rca_password": "mapred", 
+        "jce_name": "jce_policy-8.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"ranger\",\"hdfs\"]", 
+        "clientsToUpdateConfigs": "[\"*\"]"
+    }, 
+    "commandParams": {
+        "service_package_folder": "common-services/RANGER/0.4.0/package", 
+        "script": "scripts/ranger_admin.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "version": "2.6.0.0-801",
+        "max_duration_for_retries": "0", 
+        "command_retry_enabled": "false", 
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "forceRefreshConfigTags": [], 
+    "stageId": 0, 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ambari_server_use_ssl": [
+            "false"
+        ], 
+        "all_ping_ports": [
+            "8670"
+        ], 
+        "ranger_tagsync_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ranger_usersync_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ambari_server_port": [
+            "8080"
+        ], 
+        "ranger_admin_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "all_racks": [
+            "/default-rack"
+        ], 
+        "all_ipv4_ips": [
+            "172.22.83.73"
+        ], 
+        "ambari_server_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6401.ambari.apache.org"
+        ],
+        "infra_solr_hosts": [
+            "c6401.ambari.apache.org"
+        ]
+    }, 
+    "configurations": {
+        "ranger-hdfs-audit": {
+            "xasecure.audit.destination.solr.zookeepers": "NONE", 
+            "xasecure.audit.destination.solr.urls": "", 
+            "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/solr/spool", 
+            "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/hdfs/spool", 
+            "xasecure.audit.destination.hdfs": "true",
+            "xasecure.audit.destination.solr": "false", 
+            "xasecure.audit.provider.summary.enabled": "false",
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit",
+            "xasecure.audit.is.enabled": "true"
+        }, 
+        "ssl-client": {
+            "ssl.client.truststore.reload.interval": "10000", 
+            "ssl.client.keystore.password": "bigdata", 
+            "ssl.client.truststore.type": "jks", 
+            "ssl.client.keystore.location": "/etc/security/clientKeys/keystore.jks", 
+            "ssl.client.truststore.location": "/etc/security/clientKeys/all.jks", 
+            "ssl.client.truststore.password": "bigdata", 
+            "ssl.client.keystore.type": "jks"
+        }, 
+        "ranger-admin-site": {
+            "ranger.admin.kerberos.cookie.domain": "",
+            "ranger.kms.service.user.hdfs": "hdfs", 
+            "ranger.spnego.kerberos.principal": "", 
+            "ranger.ldap.ad.url": "{{ranger_ug_ldap_url}}", 
+            "ranger.plugins.hive.serviceuser": "hive", 
+            "ranger.lookup.kerberos.keytab": "", 
+            "ranger.plugins.kms.serviceuser": "kms", 
+            "ranger.service.https.attrib.ssl.enabled": "false", 
+            "ranger.sso.browser.useragent": "Mozilla,chrome", 
+            "ranger.jpa.jdbc.url": "jdbc:mysql://c6401.ambari.apache.org:3306/ranger01", 
+            "ranger.plugins.hbase.serviceuser": "hbase", 
+            "ranger.plugins.hdfs.serviceuser": "hdfs", 
+            "ranger.ldap.group.searchbase": "{{ranger_ug_ldap_group_searchbase}}", 
+            "ranger.ldap.user.dnpattern": "uid={0},ou=users,dc=xasecure,dc=net", 
+            "ranger.plugins.knox.serviceuser": "knox", 
+            "ranger.ldap.base.dn": "dc=example,dc=com", 
+            "ranger.sso.publicKey": "", 
+            "ranger.admin.kerberos.cookie.path": "/", 
+            "ranger.service.https.attrib.clientAuth": "want", 
+            "ranger.jpa.jdbc.user": "{{ranger_db_user}}", 
+            "ranger.ldap.ad.user.searchfilter": "(sAMAccountName={0})", 
+            "ranger.ldap.group.roleattribute": "cn", 
+            "ranger.plugins.kafka.serviceuser": "kafka", 
+            "ranger.admin.kerberos.principal": "", 
+            "ranger.ldap.ad.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.credential.provider.path": "/etc/ranger/admin/rangeradmin.jceks", 
+            "ranger.ldap.referral": "ignore", 
+            "ranger.service.http.port": "6080", 
+            "ranger.ldap.user.searchfilter": "(uid={0})", 
+            "ranger.plugins.atlas.serviceuser": "atlas", 
+            "ranger.truststore.password": "changeit", 
+            "ranger.ldap.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
+            "ranger.audit.solr.password": "NONE", 
+            "ranger.audit.solr.zookeepers": "c6401.ambari.apache.org:2181/ambari-solr",
+            "ranger.lookup.kerberos.principal": "", 
+            "ranger.service.https.port": "6182", 
+            "ranger.plugins.storm.serviceuser": "storm", 
+            "ranger.externalurl": "{{ranger_external_url}}", 
+            "ranger.truststore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
+            "ranger.kms.service.user.hive": "", 
+            "ranger.https.attrib.keystore.file": "/etc/ranger/admin/conf/ranger-admin-keystore.jks", 
+            "ranger.jpa.jdbc.dialect": "{{jdbc_dialect}}", 
+            "ranger.service.host": "{{ranger_host}}", 
+            "ranger.service.https.attrib.keystore.keyalias": "rangeradmin", 
+            "ranger.service.https.attrib.keystore.pass": "xasecure", 
+            "ranger.unixauth.remote.login.enabled": "true", 
+            "ranger.jpa.jdbc.credential.alias": "rangeradmin", 
+            "ranger.ldap.ad.bind.password": "{{ranger_usersync_ldap_ldapbindpassword}}", 
+            "ranger.audit.solr.username": "ranger_solr", 
+            "ranger.sso.enabled": "false", 
+            "ranger.audit.solr.urls": "", 
+            "ranger.ldap.ad.domain": "", 
+            "ranger.plugins.yarn.serviceuser": "yarn", 
+            "ranger.audit.source.type": "solr", 
+            "ranger.ldap.bind.dn": "{{ranger_ug_ldap_bind_dn}}", 
+            "ranger.ldap.url": "{{ranger_ug_ldap_url}}", 
+            "ranger.authentication.method": "UNIX", 
+            "ranger.service.http.enabled": "true", 
+            "ranger.ldap.group.searchfilter": "{{ranger_ug_ldap_group_searchfilter}}", 
+            "ranger.ldap.ad.referral": "ignore", 
+            "ranger.ldap.ad.base.dn": "dc=example,dc=com", 
+            "ranger.jpa.jdbc.password": "_", 
+            "ranger.spnego.kerberos.keytab": "", 
+            "ranger.sso.providerurl": "", 
+            "ranger.unixauth.service.hostname": "{{ugsync_host}}", 
+            "ranger.admin.kerberos.keytab": "", 
+            "ranger.admin.kerberos.token.valid.seconds": "30", 
+            "ranger.jpa.jdbc.driver": "com.mysql.jdbc.Driver", 
+            "ranger.unixauth.service.port": "5151"
+        }, 
+        "ranger-hdfs-policymgr-ssl": {
+            "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks", 
+            "xasecure.policymgr.clientssl.truststore.password": "changeit", 
+            "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks", 
+            "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file{{credential_file}}", 
+            "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword"
+        }, 
+        "tagsync-application-properties": {
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer", 
+            "atlas.kafka.zookeeper.connect": "c6401.ambari.apache.org:2181", 
+            "atlas.kafka.bootstrap.servers": "localhost:6667"
+        }, 
+        "ranger-env": {
+            "ranger_solr_shards": "1", 
+            "ranger_solr_config_set": "ranger_audits", 
+            "ranger_user": "ranger", 
+            "ranger_solr_replication_factor": "1",
+            "xml_configurations_supported": "true", 
+            "ranger-atlas-plugin-enabled": "No", 
+            "ranger-hbase-plugin-enabled": "No", 
+            "ranger-yarn-plugin-enabled": "No", 
+            "bind_anonymous": "false", 
+            "ranger_admin_username": "amb_ranger_admin", 
+            "admin_password": "admin", 
+            "is_solrCloud_enabled": "true", 
+            "ranger-storm-plugin-enabled": "No", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "ranger_group": "ranger", 
+            "ranger-knox-plugin-enabled": "No", 
+            "ranger_admin_log_dir": "/var/log/ranger/admin", 
+            "ranger-kafka-plugin-enabled": "No", 
+            "ranger_privelege_user_jdbc_url": "jdbc:mysql://c6401.ambari.apache.org:3306", 
+            "ranger-hive-plugin-enabled": "No", 
+            "xasecure.audit.destination.solr": "true", 
+            "ranger_pid_dir": "/var/run/ranger", 
+            "xasecure.audit.destination.hdfs": "true", 
+            "admin_username": "admin", 
+            "xasecure.audit.destination.hdfs.dir": "hdfs://c6401.ambari.apache.org:8020/ranger/audit", 
+            "create_db_dbuser": "true", 
+            "ranger_solr_collection_name": "ranger_audits", 
+            "ranger_admin_password": "P1!q9xa96SMi5NCl", 
+            "ranger_usersync_log_dir": "/var/log/ranger/usersync"
+        }, 
+        "usersync-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/usersync.
 log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %m%n"
+        }, 
+        "admin-properties": {
+            "db_user": "rangeradmin01", 
+            "DB_FLAVOR": "MYSQL", 
+            "db_password": "rangeradmin01", 
+            "db_root_user": "root", 
+            "policymgr_external_url": "http://c6401.ambari.apache.org:6080", 
+            "db_name": "ranger01", 
+            "db_host": "c6401.ambari.apache.org", 
+            "db_root_password": "vagrant", 
+            "SQL_CONNECTOR_JAR": "{{driver_curl_target}}"
+        }, 
+        "ranger-ugsync-site": {
+            "ranger.usersync.ldap.binddn": "", 
+            "ranger.usersync.policymgr.username": "rangerusersync", 
+            "ranger.usersync.policymanager.mockrun": "false", 
+            "ranger.usersync.group.searchbase": "", 
+            "ranger.usersync.ldap.bindalias": "testldapalias", 
+            "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks", 
+            "ranger.usersync.port": "5151", 
+            "ranger.usersync.pagedresultssize": "500", 
+            "ranger.usersync.group.memberattributename": "", 
+            "ranger.usersync.kerberos.principal": "", 
+            "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder", 
+            "ranger.usersync.ldap.referral": "ignore", 
+            "ranger.usersync.group.searchfilter": "", 
+            "ranger.usersync.ldap.user.objectclass": "person", 
+            "ranger.usersync.logdir": "{{usersync_log_dir}}", 
+            "ranger.usersync.ldap.user.searchfilter": "", 
+            "ranger.usersync.ldap.groupname.caseconversion": "none", 
+            "ranger.usersync.ldap.ldapbindpassword": "", 
+            "ranger.usersync.unix.minUserId": "500", 
+            "ranger.usersync.policymanager.maxrecordsperapicall": "1000", 
+            "ranger.usersync.group.nameattribute": "", 
+            "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password", 
+            "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks", 
+            "ranger.usersync.user.searchenabled": "false", 
+            "ranger.usersync.group.usermapsyncenabled": "true", 
+            "ranger.usersync.ldap.bindkeystore": "", 
+            "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof", 
+            "ranger.usersync.kerberos.keytab": "", 
+            "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe", 
+            "ranger.usersync.group.objectclass": "", 
+            "ranger.usersync.ldap.user.searchscope": "sub", 
+            "ranger.usersync.unix.password.file": "/etc/passwd", 
+            "ranger.usersync.ldap.user.nameattribute": "", 
+            "ranger.usersync.pagedresultsenabled": "true", 
+            "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}", 
+            "ranger.usersync.group.search.first.enabled": "false", 
+            "ranger.usersync.group.searchenabled": "false", 
+            "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder", 
+            "ranger.usersync.ssl": "true", 
+            "ranger.usersync.ldap.url": "", 
+            "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org", 
+            "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
+            "ranger.usersync.ldap.user.searchbase": "", 
+            "ranger.usersync.ldap.username.caseconversion": "none", 
+            "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks", 
+            "ranger.usersync.keystore.password": "UnIx529p", 
+            "ranger.usersync.unix.group.file": "/etc/group", 
+            "ranger.usersync.filesource.file": "/tmp/usergroup.txt", 
+            "ranger.usersync.group.searchscope": "", 
+            "ranger.usersync.truststore.password": "changeit", 
+            "ranger.usersync.enabled": "true", 
+            "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000", 
+            "ranger.usersync.filesource.text.delimiter": ","
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.checkpoint.period": "21600", 
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.content-summary.limit": "5000", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:50010", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.namenode.audit.log.async": "true", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.namenode.rpc-address": "c6401.ambari.apache.org:8020", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "nfs.file.dump.dir": "/tmp/.hdfs-nfs", 
+            "dfs.blocksize": "134217728", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
+            "dfs.namenode.fslock.fair": "false", 
+            "dfs.datanode.max.transfer.threads": "4096", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "50", 
+            "dfs.namenode.checkpoint.dir": "/grid/0/hadoop/hdfs/namesecondary", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.datanode.data.dir": "/grid/0/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.encrypt.data.transfer.cipher.suites": "AES/CTR/NoPadding", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "dfs.datanode.https.address": "0.0.0.0:50475", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.namenode.secondary.http-address": "c6401.ambari.apache.org:50090", 
+            "nfs.exports.allowed.hosts": "* rw", 
+            "dfs.datanode.http.address": "0.0.0.0:50075", 
+            "dfs.datanode.du.reserved": "33011188224", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.http.policy": "HTTP_ONLY", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.client.retry.policy.enabled": "false", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.journalnode.https-address": "0.0.0.0:8481", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.encryption.key.provider.uri": "", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.name.dir": "/grid/0/hadoop/hdfs/namenode"
+        }, 
+        "ranger-tagsync-site": {
+            "ranger.tagsync.dest.ranger.ssl.config.filename": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks", 
+            "ranger.tagsync.source.atlasrest.username": "", 
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync", 
+            "ranger.tagsync.source.atlasrest.download.interval.millis": "", 
+            "ranger.tagsync.keystore.filename": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks", 
+            "ranger.tagsync.source.file.check.interval.millis": "", 
+            "ranger.tagsync.source.atlasrest.endpoint": "", 
+            "ranger.tagsync.dest.ranger.username": "rangertagsync", 
+            "ranger.tagsync.dest.ranger.endpoint": "{{ranger_external_url}}", 
+            "ranger.tagsync.kerberos.principal": "", 
+            "ranger.tagsync.kerberos.keytab": "", 
+            "ranger.tagsync.source.atlas": "false", 
+            "ranger.tagsync.source.atlasrest": "false", 
+            "ranger.tagsync.source.file": "false", 
+            "ranger.tagsync.source.file.filename": ""
+        }, 
+        "zoo.cfg": {
+            "clientPort": "2181", 
+            "autopurge.purgeInterval": "24", 
+            "syncLimit": "5", 
+            "dataDir": "/grid/0/hadoop/zookeeper", 
+            "initLimit": "10", 
+            "tickTime": "2000", 
+            "autopurge.snapRetainCount": "30"
+        }, 
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*", 
+            "security.job.task.protocol.acl": "*", 
+            "security.datanode.protocol.acl": "*", 
+            "security.namenode.protocol.acl": "*", 
+            "security.client.datanode.protocol.acl": "*", 
+            "security.inter.tracker.protocol.acl": "*", 
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+            "security.client.protocol.acl": "*", 
+            "security.refresh.policy.protocol.acl": "hadoop", 
+            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.inter.datanode.protocol.acl": "*"
+        }, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred
 .audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging leve
 ls\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateCh
 ange=WARN"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "hadoop.rpc.protection": "authentication", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "policy_user": "ambari-qa", 
+            "common.name.for.certificate": "", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
+        }, 
+        "core-site": {
+            "hadoop.proxyuser.root.hosts": "c6401.ambari.apache.org", 
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "fs.trash.interval": "360", 
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
+            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
+            "hadoop.security.authentication": "simple", 
+            "hadoop.proxyuser.root.groups": "*", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "hadoop.security.key.provider.path": "", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.security.authorization": "false", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
+            "ipc.server.tcpnodelay": "true", 
+            "ipc.client.connect.max.retries": "50", 
+            "hadoop.security.auth_to_local": "DEFAULT", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.proxyuser.hdfs.hosts": "*", 
+            "hadoop.proxyuser.hdfs.groups": "*", 
+            "ipc.client.idlethreshold": "8000", 
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
+        }, 
+        "hadoop-env": {
+            "keyserver_port": "", 
+            "proxyuser_group": "users", 
+            "hdfs_user_nproc_limit": "65536", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "hdfs_user_nofile_limit": "128000", 
+            "hdfs_user": "hdfs", 
+            "keyserver_host": " ", 
+            "namenode_opt_maxnewsize": "128m", 
+            "namenode_opt_maxpermsize": "256m", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms
 {{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{name
 node_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARY
 NAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_D
 N_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The schedul
 ing priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Add database libraries\nJAVA_JDBC_LIBS=\"\"\nif [ -d \"/usr/share/java\" ]; then\n  for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n  do\n    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n  done\nfi\n\n# Add libraries to the hadoop classpath - some may not need a colon as they already include it\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n{% if is_datanode_max_locked_memory_set %}\n# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n# Makes sense to fix only when runing DN as root \nif [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_S
 ECURE_DN_USER\" ]; then\n  ulimit -l {{datanode_max_locked_memory}}\nfi\n{% endif %}", 
+            "namenode_heapsize": "1024m", 
+            "namenode_opt_newsize": "128m", 
+            "nfsgateway_heapsize": "1024", 
+            "dtnode_heapsize": "1024m", 
+            "hadoop_root_logger": "INFO,RFA", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "namenode_opt_permsize": "128m", 
+            "hdfs_tmp_dir": "/tmp"
+        }, 
+        "zookeeper-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
+        }, 
+        "ssl-server": {
+            "ssl.server.keystore.location": "/etc/security/serverKeys/keystore.jks", 
+            "ssl.server.keystore.keypassword": "bigdata", 
+            "ssl.server.truststore.location": "/etc/security/serverKeys/all.jks", 
+            "ssl.server.keystore.password": "bigdata", 
+            "ssl.server.truststore.password": "bigdata", 
+            "ssl.server.truststore.type": "jks", 
+            "ssl.server.keystore.type": "jks", 
+            "ssl.server.truststore.reload.interval": "10000"
+        }, 
+        "ranger-site": {}, 
+        "admin-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = warn,xa_log_appender\n\n\n# xa_logger\nlog4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.xa_log_app
 ender.file=${logdir}/xa_portal.log\nlog4j.appender.xa_log_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.xa_log_appender.append=true\nlog4j.appender.xa_log_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.xa_log_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n# xa_log_appender : category and additivity\nlog4j.category.org.springframework=warn,xa_log_appender\nlog4j.additivity.org.springframework=false\n\nlog4j.category.org.apache.ranger=info,xa_log_appender\nlog4j.additivity.org.apache.ranger=false\n\nlog4j.category.xa=info,xa_log_appender\nlog4j.additivity.xa=false\n\n# perf_logger\nlog4j.appender.perf_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.perf_appender.file=${logdir}/ranger_admin_perf.log\nlog4j.appender.perf_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.perf_appender.append=true\nlog4j.appender.perf_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.perf_appender.layout.ConversionPattern=%d [%t] 
 %m%n\n\n\n# sql_appender\nlog4j.appender.sql_appender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.sql_appender.file=${logdir}/xa_portal_sql.log\nlog4j.appender.sql_appender.datePattern='.'yyyy-MM-dd\nlog4j.appender.sql_appender.append=true\nlog4j.appender.sql_appender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.sql_appender.layout.ConversionPattern=%d [%t] %-5p %C{6} (%F:%L) - %m%n\n\n# sql_appender : category and additivity\nlog4j.category.org.hibernate.SQL=warn,sql_appender\nlog4j.additivity.org.hibernate.SQL=false\n\nlog4j.category.jdbc.sqlonly=fatal,sql_appender\nlog4j.additivity.jdbc.sqlonly=false\n\nlog4j.category.jdbc.sqltiming=warn,sql_appender\nlog4j.additivity.jdbc.sqltiming=false\n\nlog4j.category.jdbc.audit=fatal,sql_appender\nlog4j.additivity.jdbc.audit=false\n\nlog4j.category.jdbc.resultset=fatal,sql_appender\nlog4j.additivity.jdbc.resultset=false\n\nlog4j.category.jdbc.connection=fatal,sql_appender\nlog4j.additivity.jdbc.connection=false"
+        }, 
+        "tagsync-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nlog4j.rootLogger = info,logFile\n\n# logFile\nlog4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.logFile.file=${logdir}/tagsync
 .log\nlog4j.appender.logFile.datePattern='.'yyyy-MM-dd\nlog4j.appender.logFile.layout=org.apache.log4j.PatternLayout\nlog4j.appender.logFile.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n\n\n# console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.Target=System.out\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss} %5p %c{1} [%t] - %L %m%n"
+        }, 
+        "ranger-hdfs-security": {
+            "ranger.plugin.hdfs.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient", 
+            "ranger.plugin.hdfs.service.name": "{{repo_name}}", 
+            "ranger.plugin.hdfs.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache", 
+            "ranger.plugin.hdfs.policy.pollIntervalMs": "30000", 
+            "ranger.plugin.hdfs.policy.rest.url": "{{policymgr_mgr_url}}", 
+            "ranger.plugin.hdfs.policy.rest.ssl.config.file": "/etc/hadoop/conf/ranger-policymgr-ssl.xml", 
+            "xasecure.add-hadoop-authorization": "true"
+        }, 
+        "usersync-properties": {}, 
+        "zookeeper-env": {
+            "zk_log_dir": "/var/log/zookeeper", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "zk_server_heapsize": "1024m", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_user": "zookeeper"
+        },
+        "infra-solr-env": {
+            "infra_solr_datadir": "/opt/logsearch_solr/data",
+            "infra_solr_keystore_location": "/etc/security/serverKeys/logsearch.keyStore.jks",
+            "infra_solr_kerberos_name_rules": "DEFAULT",
+            "infra_solr_user": "infra-solr",
+            "infra_solr_maxmem": "1024",
+            "content": "#!/bin/bash\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# By default the script will use JAVA_HOME to determine which java\n# to use, but you can set a specific path for Solr to use without\n# affecting other Java applica
 tions on your server/workstation.\nSOLR_JAVA_HOME={{java64_home}}\n\n# Increase Java Min/Max Heap as needed to support your indexing / query needs\nSOLR_JAVA_MEM=\"-Xms{{logsearch_solr_min_mem}}m -Xmx{{logsearch_solr_max_mem}}m\"\n\n# Enable verbose GC logging\nGC_LOG_OPTS=\"-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails \\\n-XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime\"\n\n# These GC settings have shown to work well for a number of common Solr workloads\nGC_TUNE=\"-XX:NewRatio=3 \\\n-XX:SurvivorRatio=4 \\\n-XX:TargetSurvivorRatio=90 \\\n-XX:MaxTenuringThreshold=8 \\\n-XX:+UseConcMarkSweepGC \\\n-XX:+UseParNewGC \\\n-XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 \\\n-XX:+CMSScavengeBeforeRemark \\\n-XX:PretenureSizeThreshold=64m \\\n-XX:+UseCMSInitiatingOccupancyOnly \\\n-XX:CMSInitiatingOccupancyFraction=50 \\\n-XX:CMSMaxAbortablePrecleanTime=6000 \\\n-XX:+CMSParallelRemarkEnabled \\\n-XX:+ParallelRefProcEnabled\"\
 n\n# Set the ZooKeeper connection string if using an external ZooKeeper ensemble\n# e.g. host1:2181,host2:2181/chroot\n# Leave empty if not using SolrCloud\nZK_HOST=\"{{zookeeper_quorum}}{{logsearch_solr_znode}}\"\n\n# Set the ZooKeeper client timeout (for SolrCloud mode)\nZK_CLIENT_TIMEOUT=\"60000\"\n\n# By default the start script uses \"localhost\"; override the hostname here\n# for production SolrCloud environments to control the hostname exposed to cluster state\n#SOLR_HOST=\"192.168.1.1\"\n\n# By default the start script uses UTC; override the timezone if needed\n#SOLR_TIMEZONE=\"UTC\"\n\n# Set to true to activate the JMX RMI connector to allow remote JMX client applications\n# to monitor the JVM hosting Solr; set to \"false\" to disable that behavior\n# (false is recommended in production environments)\nENABLE_REMOTE_JMX_OPTS=\"true\"\n\n# The script will use SOLR_PORT+10000 for the RMI_PORT or you can set it here\nRMI_PORT={{logsearch_solr_jmx_port}}\n\n# Anything you add to
  the SOLR_OPTS variable will be included in the java\n# start command line as-is, in ADDITION to other options. If you specify the\n# -a option on start script, those options will be appended as well. Examples:\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.autoSoftCommit.maxTime=3000\"\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.autoCommit.maxTime=60000\"\n#SOLR_OPTS=\"$SOLR_OPTS -Dsolr.clustering.enabled=true\"\n\n# Location where the bin/solr script will save PID files for running instances\n# If not set, the script will create PID files in $SOLR_TIP/bin\nSOLR_PID_DIR={{logsearch_solr_piddir}}\n\n# Path to a directory where Solr creates index files, the specified directory\n# must contain a solr.xml; by default, Solr will use server/solr\nSOLR_HOME={{logsearch_solr_datadir}}\n\n# Solr provides a default Log4J configuration properties file in server/resources\n# however, you may want to customize the log settings and file appender location\n# so you can point the script to use a different log4j.properties f
 ile\nLOG4J_PROPS={{logsearch_solr_conf}}/log4j.properties\n\n# Location where Solr should write logs to; should agree with the file appender\n# settings in server/resources/log4j.properties\nSOLR_LOGS_DIR={{logsearch_solr_log_dir}}\n\n# Sets the port Solr binds to, default is 8983\nSOLR_PORT={{logsearch_solr_port}}\n\n# Be sure to update the paths to the correct keystore for your environment\n{% if logsearch_solr_ssl_enabled %}\nSOLR_SSL_KEY_STORE={{logsearch_solr_keystore_location}}\nSOLR_SSL_KEY_STORE_PASSWORD={{logsearch_solr_keystore_password}}\nSOLR_SSL_TRUST_STORE={{logsearch_solr_keystore_location}}\nSOLR_SSL_TRUST_STORE_PASSWORD={{logsearch_solr_keystore_password}}\nSOLR_SSL_NEED_CLIENT_AUTH=false\nSOLR_SSL_WANT_CLIENT_AUTH=false\n{% endif %}\n\n# Uncomment to set a specific SSL port (-Djetty.ssl.port=N); if not set\n# and you are using SSL, then the start script will use SOLR_PORT for the SSL port\n#SOLR_SSL_PORT=\n\n{% if security_enabled -%}\nSOLR_HOST=`hostname -f`\nSOLR
 _JAAS_FILE={{logsearch_solr_jaas_file}}\nSOLR_KERB_KEYTAB={{logsearch_solr_web_kerberos_keytab}}\nSOLR_KERB_PRINCIPAL={{logsearch_solr_web_kerberos_principal}}\nSOLR_KERB_NAME_RULES={{logsearch_solr_kerberos_name_rules}}\n\nSOLR_AUTHENTICATION_CLIENT_CONFIGURER=\"org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer\"\nSOLR_AUTHENTICATION_OPTS=\" -DauthenticationPlugin=org.apache.solr.security.KerberosPlugin -Djava.security.auth.login.config=$SOLR_JAAS_FILE -Dsolr.kerberos.principal=${SOLR_KERB_PRINCIPAL} -Dsolr.kerberos.keytab=${SOLR_KERB_KEYTAB} -Dsolr.kerberos.cookie.domain=${SOLR_HOST} -Dsolr.kerberos.name.rules=${SOLR_KERB_NAME_RULES}\"\n{% endif %}", 
+            "infra_solr_pid_dir": "/var/run/ambari-infra-solr",
+            "infra_solr_truststore_password": "bigdata",
+            "infra_solr_truststore_type": "jks",
+            "infra_solr_keystore_type": "jks",
+            "infra_solr_log_dir": "/var/log/ambari-infra-solr",
+            "infra_solr_web_kerberos_keytab": "/etc/security/keytabs/spnego.service.keytab",
+            "infra_solr_ssl_enabled": "false",
+            "infra_solr_client_log_dir": "/var/log/ambari-infra-solr-client",
+            "infra_solr_web_kerberos_principal": "HTTP/_HOST@EXAMPLE.COM",
+            "infra_solr_znode": "/infra-solr",
+            "infra_solr_keystore_password": "bigdata",
+            "infra_solr_port": "8886",
+            "infra_solr_kerberos_principal": "infra-solr/_HOST@EXAMPLE.COM",
+            "infra_solr_jmx_port": "18886",
+            "infra_solr_truststore_location": "/etc/security/serverKeys/logsearch.trustStore.jks",
+            "infra_solr_minmem": "512",
+            "infra_solr_kerberos_keytab": "/etc/security/keytabs/infra-solr.service.keytab"
+        },
+        "infra-solr-client-log4j": {
+            "infra_solr_client_log_dir" : "/var/log/ambari-infra-solr-client",
+            "content": "# Copyright 2011 The Apache Software Foundation\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nlog4j.rootLogger=INFO,file,stdout,stderr\n\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender
 .file.File={{logsearch_solr_client_log}}\nlog4j.appender.file.MaxFileSize=80MB\nlog4j.appender.file.MaxBackupIndex=60\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.Threshold=INFO\nlog4j.appender.stdout.Target=System.out\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=%m%n\n\nlog4j.appender.stderr=org.apache.log4j.ConsoleAppender\nlog4j.appender.stderr.Threshold=ERROR\nlog4j.appender.stderr.Target=System.err\nlog4j.appender.stderr.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stderr.layout.ConversionPattern=%m%n"
+        },
+        "cluster-env": {
+            "security_enabled": "false", 
+            "override_uid": "true", 
+            "fetch_nonlocal_groups": "true", 
+            "one_dir_per_partition": "true", 
+            "repo_ubuntu_template": "{{package_type}} {{base_url}} {{components}}", 
+            "ignore_groupsusers_create": "false", 
+            "alerts_repeat_tolerance": "1", 
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "manage_dirs_on_root": "true",
+            "recovery_lifetime_max_count": "1024", 
+            "recovery_type": "AUTO_START", 
+            "ignore_bad_mounts": "false", 
+            "recovery_window_in_minutes": "60", 
+            "user_group": "hadoop", 
+            "stack_tools": "{\n  \"stack_selector\": [\"hdp-select\", \"/usr/bin/hdp-select\", \"hdp-select\"],\n  \"conf_selector\": [\"conf-select\", \"/usr/bin/conf-select\", \"conf-select\"]\n}", 
+            "recovery_retry_interval": "5", 
+            "stack_features": "{\n  \"stack_features\": [\n    {\n      \"name\": \"snappy\",\n      \"description\": \"Snappy compressor/decompressor support\",\n      \"min_version\": \"2.0.0.0\",\n      \"max_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"lzo\",\n      \"description\": \"LZO libraries support\",\n      \"min_version\": \"2.2.1.0\"\n    },\n    {\n      \"name\": \"express_upgrade\",\n      \"description\": \"Express upgrade support\",\n      \"min_version\": \"2.1.0.0\"\n    },\n    {\n      \"name\": \"rolling_upgrade\",\n      \"description\": \"Rolling upgrade support\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"config_versioning\",\n      \"description\": \"Configurable versions support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"datanode_non_root\",\n      \"description\": \"DataNode running as non-root support (AMBARI-7615)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\
 ": \"remove_ranger_hdfs_plugin_env\",\n      \"description\": \"HDFS removes Ranger env files (AMBARI-14299)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"ranger\",\n      \"description\": \"Ranger Service support\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_tagsync_component\",\n      \"description\": \"Ranger Tagsync component support (AMBARI-14383)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"phoenix\",\n      \"description\": \"Phoenix Service support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"nfs\",\n      \"description\": \"NFS support\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"tez_for_spark\",\n      \"description\": \"Tez dependency for Spark\",\n      \"min_version\": \"2.2.0.0\",\n      \"max_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"timeline_state_store\",\n      \"description\": \"Yarn application timeline-se
 rvice supports state store property (AMBARI-11442)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"copy_tarball_to_hdfs\",\n      \"description\": \"Copy tarball to HDFS support (AMBARI-12113)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"spark_16plus\",\n      \"description\": \"Spark 1.6+\",\n      \"min_version\": \"2.4.0.0\"\n    },\n    {\n      \"name\": \"spark_thriftserver\",\n      \"description\": \"Spark Thrift Server\",\n      \"min_version\": \"2.3.2.0\"\n    },\n    {\n      \"name\": \"storm_kerberos\",\n      \"description\": \"Storm Kerberos support (AMBARI-7570)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"storm_ams\",\n      \"description\": \"Storm AMS integration (AMBARI-10710)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"create_kafka_broker_id\",\n      \"description\": \"Ambari should create Kafka Broker Id (AMBARI-12678)\",\n      \"min_version\": \"2.2
 .0.0\",\n      \"max_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"kafka_listeners\",\n      \"description\": \"Kafka listeners (AMBARI-10984)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"kafka_kerberos\",\n      \"description\": \"Kafka Kerberos support (AMBARI-10984)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"pig_on_tez\",\n      \"description\": \"Pig on Tez support (AMBARI-7863)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_usersync_non_root\",\n      \"description\": \"Ranger Usersync as non-root user (AMBARI-10416)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"ranger_audit_db_support\",\n      \"description\": \"Ranger Audit to DB support\",\n      \"min_version\": \"2.2.0.0\",\n      \"max_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"accumulo_kerberos_user_auth\",\n      \"description\": \"Accumulo Kerberos User Auth (AMBARI-10163)\",\n
       \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"knox_versioned_data_dir\",\n      \"description\": \"Use versioned data dir for Knox (AMBARI-13164)\",\n      \"min_version\": \"2.3.2.0\"\n    },\n    {\n      \"name\": \"knox_sso_topology\",\n      \"description\": \"Knox SSO Topology support (AMBARI-13975)\",\n      \"min_version\": \"2.3.8.0\"\n    },\n    {\n      \"name\": \"atlas_rolling_upgrade\",\n      \"description\": \"Rolling upgrade support for Atlas\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"oozie_admin_user\",\n      \"description\": \"Oozie install user as an Oozie admin user (AMBARI-7976)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_create_hive_tez_configs\",\n      \"description\": \"Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_setup_shared_lib\",\n      \"description\": \"Oozie 
 setup tools used to shared Oozie lib to HDFS (AMBARI-7240)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"oozie_host_kerberos\",\n      \"description\": \"Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)\",\n      \"min_version\": \"2.0.0.0\",\n      \"max_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"falcon_extensions\",\n      \"description\": \"Falcon Extension\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_metastore_upgrade_schema\",\n      \"description\": \"Hive metastore upgrade schema support (AMBARI-11176)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_server_interactive\",\n      \"description\": \"Hive server interactive support (AMBARI-15573)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_webhcat_specific_configs\",\n      \"description\": \"Hive webhcat specific configurations support (AMBARI-12364)\",\n      \"min_ver
 sion\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_purge_table\",\n      \"description\": \"Hive purge table support (AMBARI-12260)\",\n      \"min_version\": \"2.3.0.0\"\n    },\n    {\n      \"name\": \"hive_server2_kerberized_env\",\n      \"description\": \"Hive server2 working on kerberized environment (AMBARI-13749)\",\n      \"min_version\": \"2.2.3.0\",\n      \"max_version\": \"2.2.5.0\"\n    },\n    {\n      \"name\": \"hive_env_heapsize\",\n      \"description\": \"Hive heapsize property defined in hive-env (AMBARI-12801)\",\n      \"min_version\": \"2.2.0.0\"\n    },\n    {\n      \"name\": \"ranger_kms_hsm_support\",\n      \"description\": \"Ranger KMS HSM support (AMBARI-15752)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_log4j_support\",\n      \"description\": \"Ranger supporting log-4j properties (AMBARI-15681)\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_kerberos_support\",\n      \"desc
 ription\": \"Ranger Kerberos support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hive_metastore_site_support\",\n      \"description\": \"Hive Metastore site support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_usersync_password_jceks\",\n      \"description\": \"Saving Ranger Usersync credentials in jceks\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_install_infra_client\",\n      \"description\": \"LogSearch Service support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"hbase_home_directory\",\n      \"description\": \"Hbase home directory in HDFS needed for HBASE backup\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"spark_livy\",\n      \"description\": \"Livy as slave component of spark\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"atlas_ranger_plugin_support\",\n      \"description\": \"Atlas Ranger plug
 in support\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_pid_support\",\n      \"description\": \"Ranger Service support pid generation AMBARI-16756\",\n      \"min_version\": \"2.5.0.0\"\n    },\n    {\n      \"name\": \"ranger_kms_pid_support\",\n      \"description\": \"Ranger KMS Service support pid generation\",\n      \"min_version\": \"2.5.0.0\"\n    }\n  ]\n}",
+            "recovery_enabled": "true", 
+            "recovery_max_count": "6", 
+            "stack_root": "/usr/hdp", 
+            "repo_suse_rhel_template": "[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0", 
+            "managed_hdfs_resource_property_names": "", 
+            "smokeuser": "ambari-qa"
+        }
+    }
+}
\ No newline at end of file


[05/14] ambari git commit: AMBARI-19067. Special characters break creating user homes (Laszlo Puskas via magyari_sandor)

Posted by jo...@apache.org.
AMBARI-19067. Special characters break creating user homes (Laszlo Puskas via magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cac20e09
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cac20e09
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cac20e09

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: cac20e090151785edf2b8176d9ac0bddb1adc86f
Parents: 7f6ca9d
Author: Laszlo Puskas <lp...@hortonworks.com>
Authored: Mon Dec 5 14:07:33 2016 +0100
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Mon Dec 5 14:07:33 2016 +0100

----------------------------------------------------------------------
 .../src/main/resources/scripts/post-user-creation-hook.sh     | 7 +++++++
 1 file changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cac20e09/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh b/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
index 34169c1..ee8d2d1 100755
--- a/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
+++ b/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
@@ -84,6 +84,10 @@ echo "[" | cat > "$JSON_INPUT"
 while read -r LINE
 do
   USR_NAME=$(echo "$LINE" | awk -F, '{print $1}')
+  echo "Processing user name: $USR_NAME"
+
+  # encoding the username
+  USR_NAME=$(printf "%q" "$USR_NAME")
 
   cat <<EOF >> "$JSON_INPUT"
     {
@@ -97,7 +101,10 @@ do
 EOF
 done <"$CSV_FILE"
 
+# deleting the last line
 sed -i '$ d' "$JSON_INPUT"
+
+# appending json closing elements to the end of the file
 echo $'}\n]' | cat >> "$JSON_INPUT"
 echo "Generating file $JSON_INPUT ... DONE."
 echo "Processing post user creation hook payload ... DONE."


[11/14] ambari git commit: AMBARI-18065. Change Ranger Admin start script to setup db for ranger (magyari_sandor)

Posted by jo...@apache.org.
AMBARI-18065. Change Ranger Admin start script to setup db for ranger (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f5051dcb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f5051dcb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f5051dcb

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: f5051dcb7f17f0e26fed9e70eef35a3758a35e60
Parents: 431d265
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Thu Dec 1 19:36:01 2016 +0100
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Mon Dec 5 16:39:13 2016 +0100

----------------------------------------------------------------------
 .../libraries/functions/constants.py            |   1 +
 .../RANGER/0.4.0/package/scripts/params.py      |   1 +
 .../0.4.0/package/scripts/ranger_admin.py       |  36 +-
 .../HDP/2.0.6/properties/stack_features.json    |   5 +
 .../stacks/2.6/RANGER/test_ranger_admin.py      | 504 ++++++++++++
 .../2.6/configs/ranger-admin-default.json       | 704 +++++++++++++++++
 .../2.6/configs/ranger-admin-secured.json       | 773 +++++++++++++++++++
 7 files changed, 2010 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f5051dcb/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/constants.py b/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
index 3431495..46562e0 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
@@ -97,6 +97,7 @@ class StackFeature:
   RANGER_PID_SUPPORT = "ranger_pid_support"
   RANGER_KMS_PID_SUPPORT = "ranger_kms_pid_support"
   RANGER_ADMIN_PASSWD_CHANGE = "ranger_admin_password_change"
+  RANGER_SETUP_DB_ON_START = "ranger_setup_db_on_start"
   STORM_METRICS_APACHE_CLASSES = "storm_metrics_apache_classes"
   SPARK_JAVA_OPTS_SUPPORT = "spark_java_opts_support"
   ATLAS_HBASE_SETUP = "atlas_hbase_setup"

http://git-wip-us.apache.org/repos/asf/ambari/blob/f5051dcb/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
index e27b363..f2cc940 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
@@ -70,6 +70,7 @@ stack_supports_usersync_passwd = check_stack_feature(StackFeature.RANGER_USERSYN
 stack_supports_infra_client = check_stack_feature(StackFeature.RANGER_INSTALL_INFRA_CLIENT, version_for_stack_feature_checks)
 stack_supports_pid = check_stack_feature(StackFeature.RANGER_PID_SUPPORT, version_for_stack_feature_checks)
 stack_supports_ranger_admin_password_change = check_stack_feature(StackFeature.RANGER_ADMIN_PASSWD_CHANGE, version_for_stack_feature_checks)
+stack_supports_ranger_setup_db_on_start = check_stack_feature(StackFeature.RANGER_SETUP_DB_ON_START, version_for_stack_feature_checks)
 
 downgrade_from_version = default("/commandParams/downgrade_from_version", None)
 upgrade_direction = default("/commandParams/upgrade_direction", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f5051dcb/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
index a9656c7..b849d58 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
@@ -45,18 +45,9 @@ class RangerAdmin(Script):
     self.install_packages(env)
     import params
     env.set_params(params)
-    if params.xml_configurations_supported:
-      from setup_ranger_xml import setup_ranger_db
-      setup_ranger_db()
-
-    self.configure(env)
-
-    if params.xml_configurations_supported:
-      from setup_ranger_xml import setup_java_patch
-      setup_java_patch()
-
-    if params.stack_supports_ranger_admin_password_change:
-      setup_ranger_admin_passwd_change()
+    # call config and setup db only in case of HDP version < 2.6
+    if not params.stack_supports_ranger_setup_db_on_start:
+      self.configure(env, setup_db=True)
 
   def stop(self, env, upgrade_type=None):
     import params
@@ -93,7 +84,9 @@ class RangerAdmin(Script):
   def start(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    self.configure(env, upgrade_type=upgrade_type)
+
+    # setup db only if in case HDP version is > 2.6
+    self.configure(env, upgrade_type=upgrade_type, setup_db=params.stack_supports_ranger_setup_db_on_start)
 
     if params.stack_supports_infra_client and params.audit_solr_enabled and params.is_solrCloud_enabled:
       solr_cloud_util.setup_solr_client(params.config, custom_log4j = params.custom_log4j)
@@ -122,7 +115,7 @@ class RangerAdmin(Script):
         raise ComponentIsNotRunning()
     pass
 
-  def configure(self, env, upgrade_type=None):
+  def configure(self, env, upgrade_type=None, setup_db=False):
     import params
     env.set_params(params)
     if params.xml_configurations_supported:
@@ -130,8 +123,23 @@ class RangerAdmin(Script):
     else:
       from setup_ranger import ranger
 
+    # set up db if we are not upgrading and setup_db is true
+    if setup_db and upgrade_type is None:
+      if params.xml_configurations_supported:
+        from setup_ranger_xml import setup_ranger_db
+        setup_ranger_db()
+
     ranger('ranger_admin', upgrade_type=upgrade_type)
 
+    # set up java patches if we are not upgrading and setup_db is true
+    if setup_db and upgrade_type is None:
+      if params.xml_configurations_supported:
+        from setup_ranger_xml import setup_java_patch
+        setup_java_patch()
+
+      if params.stack_supports_ranger_admin_password_change:
+        setup_ranger_admin_passwd_change()
+
   def set_ru_rangeradmin_in_progress(self, upgrade_marker_file):
     config_dir = os.path.dirname(upgrade_marker_file)
     try:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f5051dcb/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index dd87b72..27a755c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -289,6 +289,11 @@
       "min_version": "2.5.0.0"
     },
     {
+      "name": "ranger_setup_db_on_start",
+      "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+      "min_version": "2.6.0.0"
+    },
+    {
       "name": "storm_metrics_apache_classes",
       "description": "Metrics sink for Storm that uses Apache class names",
       "min_version": "2.5.0.0"

http://git-wip-us.apache.org/repos/asf/ambari/blob/f5051dcb/ambari-server/src/test/python/stacks/2.6/RANGER/test_ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/RANGER/test_ranger_admin.py b/ambari-server/src/test/python/stacks/2.6/RANGER/test_ranger_admin.py
new file mode 100644
index 0000000..a3ce186
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/RANGER/test_ranger_admin.py
@@ -0,0 +1,504 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import json
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+from only_for_platform import not_for_platform, PLATFORM_WINDOWS
+
+@not_for_platform(PLATFORM_WINDOWS)
+class TestRangerAdmin(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "RANGER/0.4.0/package"
+  STACK_VERSION = "2.6"
+
+  @patch("os.path.isfile")    
+  def test_start_default(self, isfile_mock):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py",
+      classname = "RangerAdmin",
+      command = "start",
+      config_file="ranger-admin-default.json",
+      stack_version = self.STACK_VERSION,
+      target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assert_configure_default()
+
+    self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr-client',
+        create_parents = True,
+        mode = 0755,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr-client',
+        cd_access = 'a',
+        create_parents = True,
+        mode = 0755,
+        recursive_ownership = True,
+    )
+    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/solrCloudCli.sh',
+        content = StaticFile('/usr/lib/ambari-infra-solr-client/solrCloudCli.sh'),
+        mode = 0755,
+    )
+    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/log4j.properties',
+        content = InlineTemplate(self.getConfig()['configurations']['infra-solr-client-log4j']['content']),
+        mode = 0644,
+    )
+    self.assertResourceCalled('File', '/var/log/ambari-infra-solr-client/solr-client.log',
+        content = '',
+        mode = 0664,
+    )
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 5 --interval 10')
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --download-config --config-dir /tmp/solr_config_ranger_audits_0.[0-9]* --config-set ranger_audits --retry 30 --interval 5')
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --upload-config --config-dir /usr/hdp/current/ranger-admin/contrib/solr_for_audit_setup/conf --config-set ranger_audits --retry 30 --interval 5')
+    self.assertResourceCalledRegexp('^Directory$', '^/tmp/solr_config_ranger_audits_0.[0-9]*',
+                                    action=['delete'],
+                                    create_parents=True)
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection ranger_audits --config-set ranger_audits --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')
+
+    self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-start',
+      environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
+      not_if = 'ps -ef | grep proc_rangeradmin | grep -v grep',
+      user = 'ranger',
+    )
+
+    self.assertTrue(isfile_mock.called)
+    self.assertNoMoreResources()
+
+  @patch("os.path.isfile")
+  def test_start_secured(self, isfile_mock):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ranger_admin.py",
+      classname = "RangerAdmin",
+      command = "start",
+      config_file="ranger-admin-secured.json",
+      stack_version = self.STACK_VERSION,
+      target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assert_configure_secured()
+
+    self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr-client',
+        create_parents = True,
+        mode = 0755,
+        cd_access = 'a',
+    )
+    self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr-client',
+        cd_access = 'a',
+        create_parents = True,
+        mode = 0755,
+        recursive_ownership = True,
+    )
+    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/solrCloudCli.sh',
+        content = StaticFile('/usr/lib/ambari-infra-solr-client/solrCloudCli.sh'),
+        mode = 0755,
+    )
+    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/log4j.properties',
+        content = InlineTemplate(self.getConfig()['configurations']['infra-solr-client-log4j']['content']),
+        mode = 0644,
+    )
+    self.assertResourceCalled('File', '/var/log/ambari-infra-solr-client/solr-client.log',
+        content = '',
+        mode = 0664,
+    )
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger_solr_jaas.conf',
+      content = Template('ranger_solr_jaas_conf.j2'),
+      owner = 'ranger',
+    )
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /ambari-solr --check-znode --retry 5 --interval 10')
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/ambari-solr --download-config --config-dir /tmp/solr_config_ranger_audits_0.[0-9]* --config-set ranger_audits --retry 30 --interval 5')
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/ambari-solr --upload-config --config-dir /usr/hdp/current/ranger-admin/contrib/solr_for_audit_setup/conf --config-set ranger_audits --retry 30 --interval 5')
+    self.assertResourceCalledRegexp('^Directory$', '^/tmp/solr_config_ranger_audits_0.[0-9]*',
+                                    action=['delete'],
+                                    create_parents=True)
+    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/ambari-solr --create-collection --collection ranger_audits --config-set ranger_audits --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')
+
+    self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-start',
+      environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
+      not_if = 'ps -ef | grep proc_rangeradmin | grep -v grep',
+      user = 'ranger',
+    )
+
+    self.assertTrue(isfile_mock.called)
+    self.assertNoMoreResources()
+
+
+  def assert_setup_db(self):
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java-old.jar',
+                              action = ['delete'],
+                              )
+
+    self.assertResourceCalled('File', '/tmp/mysql-connector-java.jar',
+                              content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar'),
+                              mode = 0644
+                              )
+
+    self.assertResourceCalled('Execute', ('cp', '--remove-destination', '/tmp/mysql-connector-java.jar',
+                                          '/usr/hdp/2.6.0.0-801/ranger-admin/ews/lib'),
+                              sudo = True,
+                              path = ['/bin', '/usr/bin/']
+                              )
+
+    self.assertResourceCalled('File', '/usr/hdp/2.6.0.0-801/ranger-admin/ews/lib/mysql-connector-java.jar',
+                              mode = 0644
+                              )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/2.6.0.0-801/ranger-admin/install.properties',
+                              properties = self.getConfig()['configurations']['admin-properties'],
+                              owner = 'ranger'
+                              )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/2.6.0.0-801/ranger-admin/install.properties',
+                              owner = 'ranger',
+                              properties = {'SQL_CONNECTOR_JAR':
+                                              '/usr/hdp/2.6.0.0-801/ranger-admin/ews/lib/mysql-connector-java.jar'}
+                              )
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+                              owner = 'ranger',
+                              properties = {'audit_store': 'solr'}
+                              )
+
+    self.assertResourceCalled('Execute', ('ambari-python-wrap /usr/hdp/current/ranger-admin/dba_script.py -q'),
+                              user = 'ranger',
+                              environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
+                                             'RANGER_ADMIN_HOME': u'/usr/hdp/current/ranger-admin'},
+                              logoutput = True
+                              )
+
+    self.assertResourceCalled('Execute', ('ambari-python-wrap /usr/hdp/current/ranger-admin/db_setup.py'),
+                              user = 'ranger',
+                              environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
+                                             'RANGER_ADMIN_HOME': u'/usr/hdp/current/ranger-admin'},
+                              logoutput = True
+                              )
+
+  def assert_configure_default(self):
+
+    ### assert db setup
+    self.assert_setup_db()
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/conf',
+      owner = 'ranger',
+      group = 'ranger',
+      create_parents = True
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java-old.jar',
+        action = ['delete'],
+    )
+
+    self.assertResourceCalled('File', '/tmp/mysql-connector-java.jar',
+      content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar'),
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('cp', '--remove-destination', '/tmp/mysql-connector-java.jar', '/usr/hdp/current/ranger-admin/ews/lib'),
+      sudo = True,
+      path = ['/bin', '/usr/bin/']
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar',
+      mode = 0644
+    )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+      properties = self.getConfig()['configurations']['admin-properties'],
+      owner = 'ranger'
+    )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+      owner = 'ranger',
+      properties = {'SQL_CONNECTOR_JAR': '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar'}
+    )
+
+    self.assertResourceCalled('File', '/usr/lib/ambari-agent/DBConnectionVerification.jar',
+      content = DownloadSource('http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar'),
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute',
+      '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar:/usr/hdp/current/ranger-admin/ews/lib/* org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6401.ambari.apache.org:3306/ranger01\' rangeradmin01 rangeradmin01 com.mysql.jdbc.Driver',
+      path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+      tries=5,
+      try_sleep=10,
+      environment = {}
+    )
+
+    self.assertResourceCalled('Execute', ('ln', '-sf', '/usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/classes/conf', '/usr/hdp/current/ranger-admin/conf'),
+      not_if = 'ls /usr/hdp/current/ranger-admin/conf',
+      only_if = 'ls /usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/classes/conf',
+      sudo = True
+    )
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/',
+      owner='ranger',
+      group='ranger',
+      recursive_ownership = True
+    )
+
+    self.assertResourceCalled('Directory', '/var/run/ranger',
+      mode=0755,
+      owner = 'ranger',
+      group = 'hadoop',
+      cd_access = "a",
+      create_parents=True
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-env-piddir.sh',
+      content = 'export RANGER_PID_DIR_PATH=/var/run/ranger\nexport RANGER_USER=ranger',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('Directory', '/var/log/ranger/admin',
+      owner='ranger',
+      group='ranger',
+      create_parents = True,
+      cd_access = 'a',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-env-logdir.sh',
+      content = 'export RANGER_ADMIN_LOG_DIR=/var/log/ranger/admin',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-default-site.xml',
+      owner = 'ranger',
+      group = 'ranger'
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/security-applicationContext.xml',
+      owner = 'ranger',
+      group = 'ranger'
+    )
+
+    self.assertResourceCalled('Execute', ('ln', '-sf', '/usr/hdp/current/ranger-admin/ews/ranger-admin-services.sh', '/usr/bin/ranger-admin'),
+      not_if = 'ls /usr/bin/ranger-admin',
+      only_if = 'ls /usr/hdp/current/ranger-admin/ews/ranger-admin-services.sh',
+      sudo = True
+    )
+
+    self.assertResourceCalled('XmlConfig', 'ranger-admin-site.xml',
+      owner = 'ranger',
+      group = 'ranger',
+      conf_dir = '/usr/hdp/current/ranger-admin/conf',
+      configurations = self.getConfig()['configurations']['ranger-admin-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['ranger-admin-site'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/conf/ranger_jaas',
+      owner ='ranger',
+      group ='ranger',
+      mode = 0700
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/log4j.properties',
+      owner = 'ranger',
+      group = 'ranger',
+      content = self.getConfig()['configurations']['admin-log4j']['content'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('/usr/jdk64/jdk1.7.0_45/bin/java', '-cp', '/usr/hdp/current/ranger-admin/cred/lib/*', 'org.apache.ranger.credentialapi.buildks', 'create', 'rangeradmin', '-value', 'rangeradmin01', '-provider', 'jceks://file/etc/ranger/admin/rangeradmin.jceks'),
+      environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
+      logoutput=True,
+      sudo = True
+    )
+
+    self.assertResourceCalled('File', '/etc/ranger/admin/rangeradmin.jceks',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0640
+    )
+
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'ranger',
+      group = 'ranger',
+      conf_dir = '/usr/hdp/current/ranger-admin/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('ambari-python-wrap /usr/hdp/current/ranger-admin/db_setup.py -javapatch'),
+                              user = 'ranger',
+                              environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
+                                             'RANGER_ADMIN_HOME': u'/usr/hdp/current/ranger-admin'},
+                              logoutput = True
+                              )
+
+  def assert_configure_secured(self):
+
+    ### assert db setup
+    self.assert_setup_db()
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/conf',
+      owner = 'ranger',
+      group = 'ranger',
+      create_parents = True
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java-old.jar',
+        action = ['delete'],
+    )
+
+    self.assertResourceCalled('File', '/tmp/mysql-connector-java.jar',
+      content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar'),
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('cp', '--remove-destination', '/tmp/mysql-connector-java.jar', '/usr/hdp/current/ranger-admin/ews/lib'),
+      sudo = True,
+      path = ['/bin', '/usr/bin/']
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar',
+      mode = 0644
+    )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+      properties = self.getConfig()['configurations']['admin-properties'],
+      owner = 'ranger'
+    )
+
+    self.assertResourceCalled('ModifyPropertiesFile', '/usr/hdp/current/ranger-admin/install.properties',
+      owner = 'ranger',
+      properties = {'SQL_CONNECTOR_JAR': '/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar'}
+    )
+
+    self.assertResourceCalled('File', '/usr/lib/ambari-agent/DBConnectionVerification.jar',
+      content = DownloadSource('http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar'),
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute',
+      '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/ranger-admin/ews/lib/mysql-connector-java.jar:/usr/hdp/current/ranger-admin/ews/lib/* org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6401.ambari.apache.org:3306/ranger01\' rangeradmin01 rangeradmin01 com.mysql.jdbc.Driver',
+      path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+      tries=5,
+      try_sleep=10,
+      environment = {}
+    )
+
+    self.assertResourceCalled('Execute', ('ln', '-sf', '/usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/classes/conf', '/usr/hdp/current/ranger-admin/conf'),
+      not_if = 'ls /usr/hdp/current/ranger-admin/conf',
+      only_if = 'ls /usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/classes/conf',
+      sudo = True
+    )
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/',
+      owner='ranger',
+      group='ranger',
+      recursive_ownership = True
+    )
+
+    self.assertResourceCalled('Directory', '/var/run/ranger',
+      mode=0755,
+      owner = 'ranger',
+      group = 'hadoop',
+      cd_access = "a",
+      create_parents=True
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-env-piddir.sh',
+      content = 'export RANGER_PID_DIR_PATH=/var/run/ranger\nexport RANGER_USER=ranger',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('Directory', '/var/log/ranger/admin',
+      owner='ranger',
+      group='ranger',
+      create_parents = True,
+      cd_access = 'a',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-env-logdir.sh',
+      content = 'export RANGER_ADMIN_LOG_DIR=/var/log/ranger/admin',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0755
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/ranger-admin-default-site.xml',
+      owner = 'ranger',
+      group = 'ranger'
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/conf/security-applicationContext.xml',
+      owner = 'ranger',
+      group = 'ranger'
+    )
+
+    self.assertResourceCalled('Execute', ('ln', '-sf', '/usr/hdp/current/ranger-admin/ews/ranger-admin-services.sh', '/usr/bin/ranger-admin'),
+      not_if = 'ls /usr/bin/ranger-admin',
+      only_if = 'ls /usr/hdp/current/ranger-admin/ews/ranger-admin-services.sh',
+      sudo = True
+    )
+
+    self.assertResourceCalled('XmlConfig', 'ranger-admin-site.xml',
+      owner = 'ranger',
+      group = 'ranger',
+      conf_dir = '/usr/hdp/current/ranger-admin/conf',
+      configurations = self.getConfig()['configurations']['ranger-admin-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['ranger-admin-site'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Directory', '/usr/hdp/current/ranger-admin/conf/ranger_jaas',
+      owner ='ranger',
+      group ='ranger',
+      mode = 0700
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/ranger-admin/ews/webapp/WEB-INF/log4j.properties',
+      owner = 'ranger',
+      group = 'ranger',
+      content = self.getConfig()['configurations']['admin-log4j']['content'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('/usr/jdk64/jdk1.7.0_45/bin/java', '-cp', '/usr/hdp/current/ranger-admin/cred/lib/*', 'org.apache.ranger.credentialapi.buildks', 'create', 'rangeradmin', '-value', 'rangeradmin01', '-provider', 'jceks://file/etc/ranger/admin/rangeradmin.jceks'),
+      environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
+      logoutput=True,
+      sudo = True
+    )
+
+    self.assertResourceCalled('File', '/etc/ranger/admin/rangeradmin.jceks',
+      owner = 'ranger',
+      group = 'ranger',
+      mode = 0640
+    )
+
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'ranger',
+      group = 'ranger',
+      conf_dir = '/usr/hdp/current/ranger-admin/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
+      mode = 0644
+    )
+
+    self.assertResourceCalled('Execute', ('ambari-python-wrap /usr/hdp/current/ranger-admin/db_setup.py -javapatch'),
+                              user = 'ranger',
+                              environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45',
+                                             'RANGER_ADMIN_HOME': u'/usr/hdp/current/ranger-admin'},
+                              logoutput = True
+                              )
\ No newline at end of file


[07/14] ambari git commit: AMBARI-19083. After ambari only upgrade Enable security(Configure Ambari Identity) was failed because could not run "ambari-sudo.sh" (aonishuk)

Posted by jo...@apache.org.
AMBARI-19083. After ambari only upgrade Enable security(Configure Ambari Identity) was failed because could not run  "ambari-sudo.sh" (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/89b9cf5b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/89b9cf5b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/89b9cf5b

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 89b9cf5beb1304b0d90dad00b371947a3c467e8a
Parents: 08342b3
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Dec 5 15:23:58 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Dec 5 15:23:58 2016 +0200

----------------------------------------------------------------------
 ambari-server/src/main/python/ambari_server_main.py | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/89b9cf5b/ambari-server/src/main/python/ambari_server_main.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server_main.py b/ambari-server/src/main/python/ambari_server_main.py
index b642cea..dbcafa4 100644
--- a/ambari-server/src/main/python/ambari_server_main.py
+++ b/ambari-server/src/main/python/ambari_server_main.py
@@ -28,6 +28,7 @@ from ambari_commons.logging_utils import get_debug_mode, print_warning_msg, prin
 from ambari_commons.os_check import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons.os_utils import is_root, run_os_command
+from ambari_server.ambariPath import AmbariPath
 from ambari_server.dbConfiguration import ensure_dbms_is_running, ensure_jdbc_driver_is_installed
 from ambari_server.serverConfiguration import configDefaults, find_jdk, get_ambari_properties, \
   get_conf_dir, get_is_persisted, get_is_secure, get_java_exe_path, get_original_master_key, read_ambari_user, \
@@ -121,6 +122,8 @@ AMBARI_SERVER_NOT_STARTED_MSG = "Ambari Server java process hasn't been started
 ULIMIT_OPEN_FILES_KEY = 'ulimit.open.files'
 ULIMIT_OPEN_FILES_DEFAULT = 10000
 
+AMBARI_ENV_FILE = AmbariPath.get("/var/lib/ambari-server/ambari-env.sh")
+
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
 def ensure_server_security_is_configured():
   pass
@@ -179,9 +182,10 @@ def generate_child_process_param_list(ambari_user, java_exe, class_path,
     # from subprocess, we have to skip --login option of su command. That's why
     # we change dir to / (otherwise subprocess can face with 'permission denied'
     # errors while trying to list current directory
-    cmd = "{ulimit_cmd} ; {su} {ambari_user} -s {sh_shell} -c '{command}'".format(ulimit_cmd=ulimit_cmd, 
+    cmd = "{ulimit_cmd} ; {su} {ambari_user} -s {sh_shell} -c 'source {ambari_env_file} ; {command}'".format(ulimit_cmd=ulimit_cmd,
                                                                                 su=locate_file('su', '/bin'), ambari_user=ambari_user,
-                                                                                sh_shell=locate_file('sh', '/bin'), command=command)
+                                                                                sh_shell=locate_file('sh', '/bin'), command=command,
+                                                                                ambari_env_file=AMBARI_ENV_FILE)
   else:
     cmd = "{ulimit_cmd} ; {command}".format(ulimit_cmd=ulimit_cmd, command=command)
     


[08/14] ambari git commit: AMBARI-19059 Falcon quicklinks combobox is absent (dsen)

Posted by jo...@apache.org.
AMBARI-19059 Falcon quicklinks combobox is absent (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/431d2659
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/431d2659
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/431d2659

Branch: refs/heads/branch-feature-AMBARI-18456
Commit: 431d2659c67f7c07fe704b67eded4f83fe30d25a
Parents: 89b9cf5
Author: Dmytro Sen <ds...@apache.org>
Authored: Mon Dec 5 17:25:35 2016 +0200
Committer: Dmytro Sen <ds...@apache.org>
Committed: Mon Dec 5 17:25:35 2016 +0200

----------------------------------------------------------------------
 .../FALCON/0.5.0.2.1/metainfo.xml               |  7 ++++
 .../FALCON/0.5.0.2.1/quicklinks/quicklinks.json | 35 ++++++++++++++++++++
 2 files changed, 42 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/431d2659/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml
index 7dab615..2cda70a 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/metainfo.xml
@@ -131,6 +131,13 @@
       <excluded-config-types>
         <config-type>oozie-site</config-type>
       </excluded-config-types>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/431d2659/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
new file mode 100644
index 0000000..cc81fca
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/quicklinks/quicklinks.json
@@ -0,0 +1,35 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"falcon.enableTLS",
+          "desired":"true",
+          "site":"falcon-startup-properties"
+        }
+      ]
+    },
+
+    "links": [
+      {
+        "name": "falcon_dashboard",
+        "label": "Falcon Dashboard",
+        "requires_user_name": "true",
+        "component_name": "FALCON_SERVER",
+        "url": "%@://%@:%@/",
+        "port":{
+          "http_property": "falcon_port",
+          "http_default_port": "15000",
+          "https_property": "falcon_port",
+          "https_default_port": "15443",
+          "regex": "^(\\d+)$",
+          "site": "falcon-env"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file