You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/01/18 15:57:34 UTC

[01/50] [abbrv] ambari git commit: AMBARI-19570. Hive View 2.0.0: Enable view of ranger authorization for a table. (dipayanb)

Repository: ambari
Updated Branches:
  refs/heads/branch-dev-patch-upgrade cd245c00a -> eb2c904e1


AMBARI-19570. Hive View 2.0.0: Enable view of ranger authorization for a table. (dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/285666fa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/285666fa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/285666fa

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 285666fac5a14f461950386d2687d172a3574283
Parents: badf9f7
Author: Dipayan Bhowmick <di...@gmail.com>
Authored: Tue Jan 17 12:08:54 2017 +0530
Committer: Dipayan Bhowmick <di...@gmail.com>
Committed: Tue Jan 17 12:09:27 2017 +0530

----------------------------------------------------------------------
 .../view/hive20/actor/MetaDataManager.java      |   8 +-
 .../hive20/resources/system/SystemService.java  |  29 ++
 .../system/ranger/RangerException.java          |  56 ++++
 .../resources/system/ranger/RangerService.java  | 317 +++++++++++++++++++
 .../view/hive20/utils/AuthorizationChecker.java |  74 +++++
 .../resources/ui/app/adapters/application.js    |   1 +
 .../src/main/resources/ui/app/adapters/ping.js  |   5 +
 .../ui/app/configs/table-level-tabs.js          |   6 +
 .../hive20/src/main/resources/ui/app/router.js  |   1 +
 .../databases/database/tables/table/auth.js     |  27 ++
 .../src/main/resources/ui/app/styles/app.scss   |  12 +
 .../templates/components/table-properties.hbs   |   2 +-
 .../database/tables/table/auth-error.hbs        |  35 ++
 .../database/tables/table/auth-loading.hbs      |  23 ++
 .../databases/database/tables/table/auth.hbs    |  53 ++++
 .../views/hive20/src/main/resources/view.xml    |  33 ++
 16 files changed, 677 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/MetaDataManager.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/MetaDataManager.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/MetaDataManager.java
index 43733e4..525ec0d 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/MetaDataManager.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/actor/MetaDataManager.java
@@ -69,15 +69,15 @@ public class MetaDataManager extends HiveActor {
     ActorRef databaseManager = databaseManagers.get(message.getUsername());
     if (databaseManager == null) {
       databaseManager = createDatabaseManager(message.getUsername(), message.getInstanceName());
-      databaseManagers.put(context.getUsername(), databaseManager);
-      databaseManager.tell(new DatabaseManager.Refresh(context.getUsername()), getSelf());
+      databaseManagers.put(message.getUsername(), databaseManager);
+      databaseManager.tell(new DatabaseManager.Refresh(message.getUsername()), getSelf());
     } else {
       if(message.isImmediate()) {
-        databaseManager.tell(new DatabaseManager.Refresh(context.getUsername(), false), getSelf());
+        databaseManager.tell(new DatabaseManager.Refresh(message.getUsername(), false), getSelf());
       }
       cancelTerminationScheduler(message.getUsername());
     }
-    scheduleTermination(context.getUsername());
+    scheduleTermination(message.getUsername());
   }
 
   private void handleTerminate(Terminate message) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/SystemService.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/SystemService.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/SystemService.java
index 0afe43c..1399ee4 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/SystemService.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/SystemService.java
@@ -22,16 +22,29 @@ import akka.actor.ActorRef;
 import org.apache.ambari.view.hive20.BaseService;
 import org.apache.ambari.view.hive20.ConnectionSystem;
 import org.apache.ambari.view.hive20.actor.message.Ping;
+import org.apache.ambari.view.hive20.resources.system.ranger.RangerService;
+import org.json.simple.JSONObject;
 
+import javax.inject.Inject;
+import javax.ws.rs.GET;
 import javax.ws.rs.POST;
 import javax.ws.rs.Path;
+import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.Response;
+import java.util.List;
 
 /**
  * System services which are required for the working of the application
  */
 public class SystemService extends BaseService {
 
+  private final RangerService rangerService;
+
+  @Inject
+  public SystemService(RangerService rangerService) {
+    this.rangerService = rangerService;
+  }
+
   /**
    * Clients should sent pings to the server at regular interval so that the system could keep alive stuffs or do
    * cleanup work when the pings stops
@@ -45,4 +58,20 @@ public class SystemService extends BaseService {
     metaDataManager.tell(new Ping(context.getUsername(), context.getInstanceName()), ActorRef.noSender());
     return Response.ok().status(Response.Status.NO_CONTENT).build();
   }
+
+
+  /**
+   * Returns if the current user is a cluster operator or ambari administrator
+   */
+  @GET
+  @Path("/ranger/auth")
+  public Response rangerAuth(@QueryParam("database") String database,
+                             @QueryParam("table") String table) {
+
+    List<RangerService.Policy> policies = rangerService.getPolicies(database, table);
+    JSONObject response = new JSONObject();
+    response.put("policies", policies);
+    return Response.ok(response).build();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerException.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerException.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerException.java
new file mode 100644
index 0000000..f32a997
--- /dev/null
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerException.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.hive20.resources.system.ranger;
+
+import org.apache.commons.lang.exception.ExceptionUtils;
+import org.json.simple.JSONObject;
+
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Exceptions send by the Authorization API
+ */
+public class RangerException extends WebApplicationException {
+
+  public RangerException(String message, String errorCode, int status, Exception ex) {
+    super(errorEntity(message, errorCode, status, ex));
+  }
+
+  public RangerException(String message, String errorCode, int status) {
+    this(message, errorCode, status, null);
+  }
+
+  protected static Response errorEntity(String message, String errorCode, int status, Exception ex) {
+    Map<String, Object> response = new HashMap<String, Object>();
+    response.put("message", message);
+    response.put("errorCode", errorCode);
+    if (ex != null) {
+      response.put("trace", ExceptionUtils.getStackTrace(ex));
+    }
+
+    JSONObject finalResponse = new JSONObject();
+    finalResponse.put("errors", response);
+    return Response.status(status).entity(new JSONObject(finalResponse)).type(MediaType.APPLICATION_JSON).build();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerService.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerService.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerService.java
new file mode 100644
index 0000000..95ab27c
--- /dev/null
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/system/ranger/RangerService.java
@@ -0,0 +1,317 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.hive20.resources.system.ranger;
+
+import com.google.common.collect.Lists;
+import org.apache.ambari.view.AmbariHttpException;
+import org.apache.ambari.view.ViewContext;
+import org.apache.ambari.view.hive20.utils.AuthorizationChecker;
+import org.apache.ambari.view.utils.ambari.AmbariApi;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.hbase.util.Strings;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.JSONValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.inject.Inject;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class RangerService {
+
+  private static final String RANGER_CONFIG_URL = "/api/v1/clusters/%s/configurations/service_config_versions?service_name=RANGER&is_current=true";
+
+  protected final Logger LOG = LoggerFactory.getLogger(getClass());
+
+  private final AuthorizationChecker authChecker;
+  private final ViewContext context;
+
+  @Inject
+  public RangerService(AuthorizationChecker authChecker, ViewContext context) {
+    this.authChecker = authChecker;
+    this.context = context;
+  }
+
+  public List<Policy> getPolicies(String database, String table) {
+
+
+    if (context.getCluster() == null) {
+      return getPoliciesFromNonAmbariCluster(database, table);
+    } else {
+      if (!authChecker.isOperator()) {
+        LOG.error("User is not authorized to access the table authorization information");
+        throw new RangerException("User " + context.getUsername() + " does not have privilege to access the table authorization information", "NOT_OPERATOR_OR_ADMIN", 400);
+      }
+      return getPoliciesFromAmbariCluster(database, table);
+    }
+
+  }
+
+  private List<Policy> getPoliciesFromAmbariCluster(String database, String table) {
+    String rangerUrl = null;
+    try {
+      rangerUrl = getRangerUrlFromAmbari();
+    } catch (AmbariHttpException e) {
+      LOG.error("Failed to fetch Ranger URL from ambari. Exception: {}", e);
+      throw new RangerException("Failed to fetch Ranger URL from Ambari", "AMBARI_FETCH_FAILED", 500, e);
+    }
+    if (Strings.isEmpty(rangerUrl)) {
+      LOG.info("Ranger url is not configured for the instance");
+      throw new RangerException("Ranger url is not configured in Ambari.", "CONFIGURATION_ERROR", 500);
+    }
+
+    return getPoliciesFromRanger(rangerUrl, database, table);
+  }
+
+  private List<Policy> getPoliciesFromNonAmbariCluster(String database, String table) {
+    String rangerUrl = getRangerUrlFromConfig();
+    if (Strings.isEmpty(rangerUrl)) {
+      LOG.info("Ranger url is not configured for the instance");
+      throw new RangerException("Ranger url is not configured in Ambari Instance.", "CONFIGURATION_ERROR", 500);
+    }
+
+    return getPoliciesFromRanger(rangerUrl, database, table);
+  }
+
+  private List<Policy> getPoliciesFromRanger(String rangerUrl, String database, String table) {
+    RangerCred cred = getRangerCredFromConfig();
+    if (!cred.isValid()) {
+      LOG.info("Ranger username and password are not configured");
+      throw new RangerException("Bad ranger username/password", "CONFIGURATION_ERROR", 500);
+    }
+
+    String rangerResponse = fetchResponseFromRanger(rangerUrl, cred.username, cred.password, database, table);
+    if (Strings.isEmpty(rangerResponse)) {
+      return Lists.newArrayList();
+    }
+
+    return parseResponse(rangerResponse);
+  }
+
+  private List<Policy> parseResponse(String rangerResponse) {
+    JSONArray jsonArray = (JSONArray) JSONValue.parse(rangerResponse);
+    if (jsonArray.size() == 0) {
+      return new ArrayList<>();
+    }
+
+    List<Policy> policies = new ArrayList<>();
+
+    for (Object policy : jsonArray) {
+      JSONObject policyJson = (JSONObject) policy;
+      if ((Boolean) policyJson.get("isEnabled")) {
+        policies.add(parsePolicy(policyJson));
+      }
+    }
+
+    return policies;
+  }
+
+  private Policy parsePolicy(JSONObject policyJson) {
+    String name = (String) policyJson.get("name");
+    JSONArray policyItems = (JSONArray) policyJson.get("policyItems");
+    Policy policy = new Policy(name);
+
+    if (policyItems.size() > 0) {
+      JSONObject policyItem = (JSONObject) policyItems.get(0);
+      JSONArray usersJson = (JSONArray) policyItem.get("users");
+      JSONArray groupsJson = (JSONArray) policyItem.get("groups");
+
+
+      for (Object user : usersJson) {
+        policy.addUser((String) user);
+      }
+
+      for (Object group : groupsJson) {
+        policy.addGroup((String) group);
+      }
+    }
+
+
+    return policy;
+  }
+
+  private String fetchResponseFromRanger(String rangerUrl, String username, String password, String database, String table) {
+
+    String serviceName = context.getProperties().get("hive.ranger.servicename");
+    if(Strings.isEmpty(serviceName)) {
+      LOG.error("Bad service name configured");
+      throw new RangerException("Ranger service name is not configured in Ambari Instance.", "CONFIGURATION_ERROR", 500);
+    }
+
+    Map<String, String> headers = getRangerHeaders(username, password);
+    StringBuilder urlBuilder = getRangerUrl(rangerUrl, database, table, serviceName);
+
+    try {
+      InputStream stream = context.getURLStreamProvider().readFrom(urlBuilder.toString(), "GET", (String) null, headers);
+      if (stream == null) {
+        LOG.error("Ranger returned an empty stream.");
+        throw new RangerException("Ranger returned an empty stream.", "RANGER_ERROR", 500);
+      }
+      return IOUtils.toString(stream);
+    } catch (IOException e) {
+      LOG.error("Bad response from Ranger. Exception: {}", e);
+      throw new RangerException("Bad response from Ranger", "RANGER_ERROR", 500, e);
+    }
+  }
+
+  private StringBuilder getRangerUrl(String rangerUrl, String database, String table, String serviceName) {
+    StringBuilder queryParams = new StringBuilder();
+    if (!Strings.isEmpty(database)) {
+      queryParams.append("resource:database=");
+      queryParams.append(database);
+      if (!Strings.isEmpty(table)) {
+        queryParams.append("&");
+      }
+    }
+
+    if (!Strings.isEmpty(table)) {
+      queryParams.append("resource:table=");
+      queryParams.append(table);
+    }
+
+
+    String queryParamString = queryParams.toString();
+
+    StringBuilder urlBuilder = new StringBuilder();
+    urlBuilder.append(rangerUrl);
+    urlBuilder.append("/service/public/v2/api/service/");
+    urlBuilder.append(serviceName);
+    urlBuilder.append("/policy");
+    if (!Strings.isEmpty(queryParamString)) {
+      urlBuilder.append("?");
+      urlBuilder.append(queryParamString);
+    }
+    return urlBuilder;
+  }
+
+  private Map<String, String> getRangerHeaders(String username, String password) {
+    String authString = username + ":" + password;
+    byte[] authBytes = Base64.encodeBase64(authString.getBytes());
+    String auth = new String(authBytes);
+    Map<String, String> headers = new HashMap<>();
+    headers.put("Authorization", "Basic " + auth);
+    return headers;
+  }
+
+  private RangerCred getRangerCredFromConfig() {
+    return new RangerCred(context.getProperties().get("hive.ranger.username"),
+        context.getProperties().get("hive.ranger.password"));
+  }
+
+  public String getRangerUrlFromAmbari() throws AmbariHttpException {
+
+    AmbariApi ambariApi = new AmbariApi(context);
+    String url = String.format(RANGER_CONFIG_URL, context.getCluster().getName());
+    String config = ambariApi.readFromAmbari(url, "GET", null, null);
+    JSONObject configJson = (JSONObject) JSONValue.parse(config);
+    JSONArray itemsArray = (JSONArray) configJson.get("items");
+    if (itemsArray.size() == 0) {
+      LOG.error("Ranger service is not enabled in Ambari");
+      throw new RangerException("Ranger service is not enabled in Ambari", "SERVICE_ERROR", 500);
+    }
+    JSONObject item = (JSONObject) itemsArray.get(0);
+    JSONArray configurations = (JSONArray) item.get("configurations");
+    for (Object configuration : configurations) {
+      JSONObject configurationJson = (JSONObject) configuration;
+      String type = (String) configurationJson.get("type");
+      if (type.equalsIgnoreCase("admin-properties")) {
+        JSONObject properties = (JSONObject) configurationJson.get("properties");
+        return (String) properties.get("policymgr_external_url");
+      }
+    }
+    return null;
+  }
+
+  public String getRangerUrlFromConfig() {
+    return context.getProperties().get("hive.ranger.url");
+  }
+
+  /**
+   * POJO class to store the policy information from Ranger
+   */
+  public static class Policy {
+    private String name;
+    private List<String> users = new ArrayList<>();
+    private List<String> groups = new ArrayList<>();
+
+    public Policy(String name) {
+      this.name = name;
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public void setName(String name) {
+      this.name = name;
+    }
+
+    public List<String> getUsers() {
+      return users;
+    }
+
+    public void setUsers(List<String> users) {
+      this.users = users;
+    }
+
+    public List<String> getGroups() {
+      return groups;
+    }
+
+    public void setGroups(List<String> groups) {
+      this.groups = groups;
+    }
+
+
+    public void addUser(String user) {
+      users.add(user);
+    }
+
+    public void addGroup(String group) {
+      groups.add(group);
+    }
+  }
+
+  /**
+   * POJO class to store the username and password for ranger access
+   */
+  private class RangerCred {
+    public String username;
+    public String password;
+
+    public RangerCred(String username, String password) {
+      this.username = username;
+      this.password = password;
+    }
+
+    public boolean isValid() {
+      return !(Strings.isEmpty(username) || Strings.isEmpty(password));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/utils/AuthorizationChecker.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/utils/AuthorizationChecker.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/utils/AuthorizationChecker.java
new file mode 100644
index 0000000..3121ba0
--- /dev/null
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/utils/AuthorizationChecker.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.view.hive20.utils;
+
+import org.apache.ambari.view.AmbariHttpException;
+import org.apache.ambari.view.ViewContext;
+import org.apache.ambari.view.utils.ambari.AmbariApi;
+import org.apache.ambari.view.utils.ambari.NoClusterAssociatedException;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.JSONValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.inject.Inject;
+
+/**
+ * Utility class to check the authorization of the user
+ */
+public class AuthorizationChecker {
+  protected final Logger LOG = LoggerFactory.getLogger(getClass());
+  private static final String AMBARI_OR_CLUSTER_ADMIN_PRIVILEGE_URL = "/api/v1/users/%s?privileges/PrivilegeInfo/permission_name=AMBARI.ADMINISTRATOR|" +
+      "(privileges/PrivilegeInfo/permission_name.in(CLUSTER.ADMINISTRATOR,CLUSTER.OPERATOR)&privileges/PrivilegeInfo/cluster_name=%s)";
+
+  private final ViewContext viewContext;
+  private final AmbariApi ambariApi;
+
+
+  @Inject
+  public AuthorizationChecker(ViewContext viewContext) {
+    this.viewContext = viewContext;
+    this.ambariApi = new AmbariApi(viewContext);
+  }
+
+  public boolean isOperator() {
+    if (viewContext.getCluster() == null) {
+      throw new NoClusterAssociatedException("No cluster is associated with the current instance");
+    }
+    String fetchUrl = String.format(AMBARI_OR_CLUSTER_ADMIN_PRIVILEGE_URL, viewContext.getUsername(), viewContext.getCluster().getName());
+
+    try {
+      String response = ambariApi.readFromAmbari(fetchUrl, "GET", null, null);
+
+      if (response != null && !response.isEmpty()) {
+        JSONObject json = (JSONObject) JSONValue.parse(response);
+        if (json.containsKey("privileges")) {
+          JSONArray privileges = (JSONArray) json.get("privileges");
+          if (privileges.size() > 0) return true;
+        }
+      }
+
+    } catch (AmbariHttpException e) {
+      LOG.error("Got Error response from url : {}. Response : {}", fetchUrl, e.getMessage(), e);
+    }
+
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js b/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
index 82d53e4..c0189cc 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/adapters/application.js
@@ -56,6 +56,7 @@ export default DS.RESTAdapter.extend({
       // by setting the proxyurl parameter in ember serve and for ambari to authenticate the requests, it needs this
       // basic authorization. This is for default admin/admin username/password combination.
       headers['Authorization'] = 'Basic YWRtaW46YWRtaW4=';
+      //headers['Authorization'] = 'Basic aGl2ZTpoaXZl';
     }
      return headers;
   }),

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/adapters/ping.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/adapters/ping.js b/contrib/views/hive20/src/main/resources/ui/app/adapters/ping.js
index 20c6d9c..f88cfed 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/adapters/ping.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/adapters/ping.js
@@ -26,5 +26,10 @@ export default ApplicationAdapter.extend({
 
   pathForType() {
     return "system/ping";
+  },
+
+  fetchAuth(databaseName, tableName) {
+    const url = this.buildURL() + '/system/ranger/auth';
+    return this.ajax(url, "GET", {data: {database: databaseName, table: tableName}});
   }
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/configs/table-level-tabs.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/configs/table-level-tabs.js b/contrib/views/hive20/src/main/resources/ui/app/configs/table-level-tabs.js
index 7a0cec1..ab7125a 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/configs/table-level-tabs.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/configs/table-level-tabs.js
@@ -60,6 +60,12 @@ let tableLevelTabs = [
     label: 'STATISTICS',
     link: 'databases.database.tables.table.stats',
     faIcon: 'line-chart'
+  }),
+  Ember.Object.create({
+    name: 'authorization',
+    label: 'AUTHORIZATION',
+    link: 'databases.database.tables.table.auth',
+    faIcon: 'users'
   })
 ];
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/router.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/router.js b/contrib/views/hive20/src/main/resources/ui/app/router.js
index 692cefd..e32dfe8 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/router.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/router.js
@@ -42,6 +42,7 @@ Router.map(function() {
           this.route('view');
           this.route('ddl');
           this.route('stats');
+          this.route('auth');
         })
       });
     });

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table/auth.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table/auth.js b/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table/auth.js
new file mode 100644
index 0000000..ec9d1a2
--- /dev/null
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table/auth.js
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import TableMetaRouter from './table-meta-router';
+
+export default TableMetaRouter.extend({
+  model(params, transition) {
+    let databaseName = transition.params['databases.database']['databaseId'];
+    let tableName = transition.params['databases.database.tables.table']['name'];
+    return this.store.adapterFor('ping').fetchAuth(databaseName, tableName);
+  }
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss b/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
index 8e0ab21..5ae65d1 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
+++ b/contrib/views/hive20/src/main/resources/ui/app/styles/app.scss
@@ -805,6 +805,18 @@ pre {
 }
 
 
+.loader {
+  padding-top: 25px;
+  padding-bottom: 25px;
+}
+
+.authorizations {
+  &.alert {
+    margin: 0;
+  }
+}
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-properties.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-properties.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-properties.hbs
index 0eaab5e..953ef84 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-properties.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/table-properties.hbs
@@ -33,7 +33,7 @@
   {{/each}}
   <tr class="new-settings text-center">
     <td colspan="3">
-      <a {{action "addNewRow"}}>{{fa-icon "plus"}} Add New Column</a>
+      <a {{action "addNewRow"}}>{{fa-icon "plus"}} Add New Properties</a>
     </td>
   </tr>
   </tbody>

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth-error.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth-error.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth-error.hbs
new file mode 100644
index 0000000..db3d3f2
--- /dev/null
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth-error.hbs
@@ -0,0 +1,35 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+<div class="row">
+  <div class="authorizations alert alert-info">
+    <p class="lead">{{fa-icon "shield" size=1}} Ranger policies</p>
+  </div>
+</div>
+<div class="row">
+  <div class="col-md-12 alert alert-danger">
+    <p><strong>Message:</strong> {{model.errors.message}}</p>
+    <p><strong>Error Code:</strong> {{model.errors.errorCode}}</p>
+    {{#if model.errors.trace}}
+      <p><strong>Trace:</strong></p>
+      <pre class="prettyprint">
+        {{model.errors.trace}}
+      </pre>
+    {{/if}}
+  </div>
+</div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth-loading.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth-loading.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth-loading.hbs
new file mode 100644
index 0000000..fbeb1a8
--- /dev/null
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth-loading.hbs
@@ -0,0 +1,23 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+<div class="col-md-12 text-center loader">
+  {{fa-icon "spinner" spin=true size=3}}
+  <br>
+  <h3>Loading authorization information from Ranger</h3>
+</div>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth.hbs
new file mode 100644
index 0000000..f717bcc
--- /dev/null
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/auth.hbs
@@ -0,0 +1,53 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+<div class="row">
+  <div class="authorizations alert alert-info">
+    <p class="lead">{{fa-icon "shield" size=1}} Ranger policies</p>
+  </div>
+</div>
+
+<div class="row">
+  <table class="table table-bordered table-hover">
+    <thead>
+    <tr>
+      <th width="20%">POLICY NAME</th>
+      <th width="40%">USERS</th>
+      <th width="40%">GROUPS</th>
+    </tr>
+    </thead>
+    <tbody>
+    {{#each model.policies as |policy|}}
+      <tr>
+        <td>{{policy.name}}</td>
+        <td>
+          {{#each policy.users as |user|}}
+            <span class="label label-success">{{user}}</span>
+          {{/each}}
+        </td>
+        <td>
+          {{#each policy.groups as |group|}}
+            <span class="label label-success">{{group}}</span>
+          {{/each}}
+        </td>
+      </tr>
+    {{/each}}
+    </tbody>
+  </table>
+</div>
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/285666fa/contrib/views/hive20/src/main/resources/view.xml
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/view.xml b/contrib/views/hive20/src/main/resources/view.xml
index 2cbfef0..7cbe15c 100644
--- a/contrib/views/hive20/src/main/resources/view.xml
+++ b/contrib/views/hive20/src/main/resources/view.xml
@@ -57,6 +57,30 @@
     </parameter>
 
     <parameter>
+        <name>hive.ranger.servicename</name>
+        <description>Set the service name of ranger configured for this hive cluster</description>
+        <label>Ranger Service Name</label>
+        <placeholder>c1_hive</placeholder>
+        <required>false</required>
+    </parameter>
+
+    <parameter>
+        <name>hive.ranger.username</name>
+        <description>Admin username for ranger</description>
+        <label>Ranger Username</label>
+        <default-value>admin</default-value>
+        <required>false</required>
+    </parameter>
+    <parameter>
+        <name>hive.ranger.password</name>
+        <description>Admin password for ranger</description>
+        <label>Ranger Password</label>
+        <default-value>admin</default-value>
+        <masked>true</masked>
+        <required>false</required>
+    </parameter>
+
+    <parameter>
         <name>hive.metastore.warehouse.dir</name>
         <description>Hive Metastore directory (example: /apps/hive/warehouse)</description>
         <label>Hive Metastore directory</label>
@@ -149,6 +173,15 @@
     </parameter>
 
     <parameter>
+        <name>hive.ranger.url</name>
+        <description>Ranger URL</description>
+        <label>Ranger URL</label>
+        <placeholder>http://rangerhost:post</placeholder>
+        <cluster-config>fake</cluster-config>
+        <required>false</required>
+    </parameter>
+
+    <parameter>
         <name>webhdfs.username</name>
         <description>doAs for proxy user for HDFS. By default, uses the currently logged-in Ambari user.</description>
         <label>WebHDFS Username</label>


[39/50] [abbrv] ambari git commit: AMBARI-19545: Ambari-agent - In HIVE and OOZIE stack scripts, copy JCEKS file to desired location

Posted by nc...@apache.org.
AMBARI-19545: Ambari-agent - In HIVE and OOZIE stack scripts, copy JCEKS file to desired location

This reverts commit e700484e80446174d72b3ce40295cbea4689a50a.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6ccff934
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6ccff934
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6ccff934

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 6ccff934da9f5c387dd55e37d3d478037454b12c
Parents: 39174ea
Author: Nahappan Somasundaram <ns...@hortonworks.com>
Authored: Tue Jan 17 11:48:57 2017 -0800
Committer: Nahappan Somasundaram <ns...@hortonworks.com>
Committed: Tue Jan 17 18:59:22 2017 -0800

----------------------------------------------------------------------
 .../libraries/functions/security_commons.py     | 37 ++++++++++++++++++++
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |  7 ++++
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    | 18 ++++++++--
 3 files changed, 60 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6ccff934/ambari-common/src/main/python/resource_management/libraries/functions/security_commons.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/security_commons.py b/ambari-common/src/main/python/resource_management/libraries/functions/security_commons.py
index 8282dc5..cca244d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/security_commons.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/security_commons.py
@@ -22,11 +22,48 @@ from resource_management import Execute, File
 from tempfile import mkstemp
 import os
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+from resource_management.core.source import StaticFile
 
 FILE_TYPE_XML = 'XML'
 FILE_TYPE_PROPERTIES = 'PROPERTIES'
 FILE_TYPE_JAAS_CONF = 'JAAS_CONF'
 
+# The property name used by the hadoop credential provider
+HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME = 'hadoop.security.credential.provider.path'
+
+# Copy JCEKS provider to service specific location and update the ACL
+def update_credential_provider_path(config, config_type, dest_provider_path, file_owner, file_group):
+  """
+  Copies the JCEKS file for the specified config from the default location to the given location,
+  and sets the ACLs for the specified owner and group. Also updates the config type's configuration
+  hadoop credential store provider with the copied file name.
+  :param config: configurations['configurations'][config_type]
+  :param config_type: Like hive-site, oozie-site, etc.
+  :param dest_provider_path: The full path to the file where the JCEKS provider file is to be copied to.
+  :param file_owner: File owner
+  :param file_group: Group
+  :return: A copy of the config that was modified or the input config itself if nothing was modified.
+  """
+  # Get the path to the provider <config_type>.jceks
+  if HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME in config:
+    provider_paths = config[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME].split(',')
+    for path_index in range(len(provider_paths)):
+      provider_path = provider_paths[path_index]
+      if config_type == os.path.splitext(os.path.basename(provider_path))[0]:
+        src_provider_path = provider_path[len('jceks://file'):]
+        File(dest_provider_path,
+             owner = file_owner,
+             group = file_group,
+             mode = 0640,
+             content = StaticFile(src_provider_path)
+             )
+        provider_paths[path_index] = 'jceks://file{0}'.format(dest_provider_path)
+        # make a copy of the config dictionary since it is read-only
+        config_copy = config.copy()
+        config_copy[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = ','.join(provider_paths)
+        return config_copy
+  return config
+
 def validate_security_config_properties(params, configuration_rules):
   """
   Generic security configuration validation based on a set of rules and operations

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ccff934/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index f825982..16273c7 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -41,6 +41,7 @@ from resource_management.core.shell import quote_bash_args
 from resource_management.core.logger import Logger
 from resource_management.core import utils
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
+from resource_management.libraries.functions.security_commons import update_credential_provider_path
 from ambari_commons.constants import SERVICE
 
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -64,6 +65,12 @@ def hive(name=None):
   for conf_dir in params.hive_conf_dirs_list:
     fill_conf_dir(conf_dir)
 
+  params.hive_site_config = update_credential_provider_path(params.hive_site_config,
+                                                     'hive-site',
+                                                     os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
+                                                     params.hive_user,
+                                                     params.user_group
+                                                     )
   XmlConfig("hive-site.xml",
             conf_dir=params.hive_config_dir,
             configurations=params.hive_site_config,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6ccff934/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 4a472ff..def0545 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -36,6 +36,7 @@ from resource_management.libraries.functions.oozie_prepare_war import prepare_wa
 from resource_management.libraries.functions.copy_tarball import get_current_version
 from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.security_commons import update_credential_provider_path
 from resource_management.core.resources.packaging import Package
 from resource_management.core.shell import as_user, as_sudo, call
 from resource_management.core.exceptions import Fail
@@ -50,7 +51,6 @@ from ambari_commons.inet_utils import download_file
 
 from resource_management.core import Logger
 
-
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def oozie(is_server=False):
   import params
@@ -115,6 +115,14 @@ def oozie(is_server=False):
              owner = params.oozie_user,
              group = params.user_group
   )
+
+  params.oozie_site = update_credential_provider_path(params.oozie_site,
+                                                      'oozie-site',
+                                                      os.path.join(params.conf_dir, 'oozie-site.jceks'),
+                                                      params.oozie_user,
+                                                      params.user_group
+                                                      )
+
   XmlConfig("oozie-site.xml",
     conf_dir = params.conf_dir,
     configurations = params.oozie_site,
@@ -289,9 +297,15 @@ def oozie_server_specific():
         group = params.user_group
     )
     if 'hive-site' in params.config['configurations']:
+      hive_site_config = update_credential_provider_path(params.config['configurations']['hive-site'],
+                                                         'hive-site',
+                                                         os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
+                                                         params.oozie_user,
+                                                         params.user_group
+                                                         )
       XmlConfig("hive-site.xml",
         conf_dir=params.hive_conf_dir,
-        configurations=params.config['configurations']['hive-site'],
+        configurations=hive_site_config,
         configuration_attributes=params.config['configuration_attributes']['hive-site'],
         owner=params.oozie_user,
         group=params.user_group,


[17/50] [abbrv] ambari git commit: AMBARI-19590 Add/delete host component: visual changes for configuration popup. (ababiichuk)

Posted by nc...@apache.org.
AMBARI-19590 Add/delete host component: visual changes for configuration popup. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/83716672
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/83716672
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/83716672

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 83716672e99cbf012f9db1561c7d1d044459ae4f
Parents: 71c5b1f
Author: ababiichuk <ab...@hortonworks.com>
Authored: Tue Jan 17 18:35:37 2017 +0200
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Tue Jan 17 18:35:37 2017 +0200

----------------------------------------------------------------------
 ambari-web/app/messages.js                         |  1 +
 .../common/modal_popups/dependent_configs_list.hbs | 17 +++++++++--------
 2 files changed, 10 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/83716672/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index cacb798..961af55 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -467,6 +467,7 @@ Em.I18n.translations = {
   'popup.dependent.configs.header': 'Dependent Configurations',
   'popup.dependent.configs.title.recommendation': 'Based on your configuration changes, Ambari is recommending the following dependent configuration changes.',
   'popup.dependent.configs.title.values': 'Ambari will update all checked configuration changes to the <b>Recommended Value</b>. Uncheck any configuration to retain the <b>Current Value</b>.',
+  'popup.dependent.configs.title.required': 'The following configuration changes are required and will be applied automatically.',
   'popup.dependent.configs.table.recommended': 'Recommended Changes',
   'popup.dependent.configs.table.required': 'Required Changes',
   'popup.dependent.configs.table.saveProperty': 'Save property',

http://git-wip-us.apache.org/repos/asf/ambari/blob/83716672/ambari-web/app/templates/common/modal_popups/dependent_configs_list.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/modal_popups/dependent_configs_list.hbs b/ambari-web/app/templates/common/modal_popups/dependent_configs_list.hbs
index af84306..c53e29d 100644
--- a/ambari-web/app/templates/common/modal_popups/dependent_configs_list.hbs
+++ b/ambari-web/app/templates/common/modal_popups/dependent_configs_list.hbs
@@ -16,17 +16,15 @@
 * limitations under the License.
 }}
 
-{{#if view.recommendations.length}}
-  <div class="alert alert-warning">
-    {{#if view.isAfterRecommendation}}
-      <div>{{t popup.dependent.configs.title.recommendation}}</div>
-    {{/if}}
-    <div>{{t popup.dependent.configs.title.values}}</div>
-  </div>
-{{/if}}
 <span id="config-dependencies" class="limited-height-2">
   {{#if view.recommendations.length}}
     <h4>{{t popup.dependent.configs.table.recommended}}</h4>
+    <div class="alert alert-warning">
+      {{#if view.isAfterRecommendation}}
+        <div>{{t popup.dependent.configs.title.recommendation}}</div>
+      {{/if}}
+      <div>{{t popup.dependent.configs.title.values}}</div>
+    </div>
     <table class="table table-hover">
       <thead>
         <tr>
@@ -70,6 +68,9 @@
   {{/if}}
   {{#if view.requiredChanges.length}}
     <h4>{{t popup.dependent.configs.table.required}}</h4>
+    <div class="alert alert-warning">
+      {{t popup.dependent.configs.title.required}}
+    </div>
     <table class="table table-hover">
       <thead>
         <tr>


[33/50] [abbrv] ambari git commit: AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)

Posted by nc...@apache.org.
AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4d44269f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4d44269f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4d44269f

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 4d44269f1aff07bc09cc0677bf9b626a62b93d61
Parents: b8ef3ad
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Jan 17 13:35:00 2017 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Jan 17 13:35:00 2017 -0800

----------------------------------------------------------------------
 .../common-services/YARN/2.1.0.2.0/kerberos.json        | 12 ++++++------
 .../common-services/YARN/3.0.0.3.0/kerberos.json        | 12 ++++++------
 .../stacks/HDP/2.2/services/YARN/kerberos.json          | 12 ++++++------
 .../stacks/HDP/2.3.ECS/services/YARN/kerberos.json      | 12 ++++++------
 .../stacks/HDP/2.3/services/YARN/kerberos.json          | 12 ++++++------
 .../stacks/HDP/2.5/services/YARN/kerberos.json          | 12 ++++++------
 .../stacks/PERF/1.0/services/YARN/kerberos.json         | 12 ++++++------
 .../stacks/2.2/configs/pig-service-check-secure.json    | 12 ++++++------
 .../kerberos/test_kerberos_descriptor_2_1_3.json        | 12 ++++++------
 .../assets/data/stacks/HDP-2.1/service_components.json  | 12 ++++++------
 ambari-web/app/data/configs/wizards/secure_mapping.js   | 12 ++++++------
 11 files changed, 66 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
index 6b61c13..c8b5989 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
@@ -23,13 +23,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
index d334887..fb85e7a 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
index ad30b76..85a3221 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
@@ -23,13 +23,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
index 7977941..e27513a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
@@ -26,13 +26,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
index 73addb1..bf0280b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
index d334887..fb85e7a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
index 7e74237..2735323 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure"
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
index f14eb52..0ac9e78 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
@@ -402,7 +402,7 @@
             "yarn.log-aggregation-enable": "true", 
             "yarn.nodemanager.delete.debug-delay-sec": "0", 
             "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore", 
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "", 
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
             "yarn.timeline-service.client.retry-interval-ms": "1000", 
             "hadoop.registry.zk.quorum": "c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181", 
             "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
@@ -424,7 +424,7 @@
             "yarn.nodemanager.resource.memory-mb": "2048", 
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "", 
             "yarn.nodemanager.resource.cpu-vcores": "1", 
-            "yarn.resourcemanager.proxyusers.*.users": "", 
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.timeline-service.ttl-ms": "2678400000", 
             "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100", 
             "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000", 
@@ -433,7 +433,7 @@
             "yarn.nodemanager.log.retain-seconds": "604800",
             "yarn.timeline-service.http-authentication.type": "kerberos", 
             "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
-            "yarn.resourcemanager.proxyusers.*.groups": "", 
+            "yarn.resourcemanager.proxyuser.*.groups": "",
             "yarn.timeline-service.client.max-retries": "30", 
             "yarn.nodemanager.health-checker.interval-ms": "135000", 
             "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
@@ -445,7 +445,7 @@
             "yarn.client.nodemanager-connect.max-wait-ms": "60000", 
             "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", 
             "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", 
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "", 
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
             "yarn.timeline-service.http-authentication.signer.secret.provider": "", 
             "yarn.resourcemanager.bind-host": "0.0.0.0", 
             "yarn.http.policy": "HTTP_ONLY", 
@@ -463,7 +463,7 @@
             "hadoop.registry.rm.enabled": "false", 
             "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", 
             "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", 
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "", 
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.nodemanager.log-aggregation.compression-type": "gz", 
             "yarn.timeline-service.http-authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
             "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", 
@@ -478,7 +478,7 @@
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", 
             "yarn.resourcemanager.connect.retry-interval.ms": "30000", 
             "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000", 
-            "yarn.resourcemanager.proxyusers.*.hosts": ""
+            "yarn.resourcemanager.proxyuser.*.hosts": ""
         }, 
         "capacity-scheduler": {
             "yarn.scheduler.capacity.default.minimum-user-limit-percent": "100", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
index bcc5359..3d0dc28 100644
--- a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
+++ b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
@@ -796,7 +796,7 @@
     }, {
       "yarn-site": {
         "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-        "yarn.resourcemanager.proxyusers.*.users": "",
+        "yarn.resourcemanager.proxyuser.*.users": "",
         "yarn.timeline-service.http-authentication.token.validity": "",
         "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
         "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
@@ -805,14 +805,14 @@
         "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
         "yarn.acl.enable": "true",
         "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-        "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-        "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+        "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+        "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
         "yarn.timeline-service.http-authentication.signature.secret": "",
         "yarn.timeline-service.http-authentication.signature.secret.file": "",
-        "yarn.resourcemanager.proxyusers.*.hosts": "",
-        "yarn.resourcemanager.proxyusers.*.groups": "",
+        "yarn.resourcemanager.proxyuser.*.hosts": "",
+        "yarn.resourcemanager.proxyuser.*.groups": "",
         "yarn.timeline-service.enabled": "true",
-        "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+        "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
         "yarn.timeline-service.http-authentication.cookie.domain": ""
       }
     }, {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
index 147c1c0..0a8f20b 100644
--- a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
+++ b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
@@ -2893,7 +2893,7 @@
             {
               "yarn-site" : {
                 "yarn.timeline-service.http-authentication.signer.secret.provider.object" : "",
-                "yarn.resourcemanager.proxyusers.*.users" : "",
+                "yarn.resourcemanager.proxyuser.*.users" : "",
                 "yarn.timeline-service.http-authentication.token.validity" : "",
                 "yarn.timeline-service.http-authentication.kerberos.name.rules" : "",
                 "yarn.timeline-service.http-authentication.cookie.path" : "",
@@ -2901,14 +2901,14 @@
                 "yarn.resourcemanager.proxy-user-privileges.enabled" : "true",
                 "yarn.acl.enable" : "true",
                 "yarn.timeline-service.http-authentication.signer.secret.provider" : "",
-                "yarn.timeline-service.http-authentication.proxyusers.*.groups" : "",
-                "yarn.timeline-service.http-authentication.proxyusers.*.hosts" : "",
+                "yarn.timeline-service.http-authentication.proxyuser.*.groups" : "",
+                "yarn.timeline-service.http-authentication.proxyuser.*.hosts" : "",
                 "yarn.timeline-service.http-authentication.signature.secret" : "",
                 "yarn.timeline-service.http-authentication.signature.secret.file" : "",
-                "yarn.resourcemanager.proxyusers.*.hosts" : "",
-                "yarn.resourcemanager.proxyusers.*.groups" : "",
+                "yarn.resourcemanager.proxyuser.*.hosts" : "",
+                "yarn.resourcemanager.proxyuser.*.groups" : "",
                 "yarn.timeline-service.enabled" : "false",
-                "yarn.timeline-service.http-authentication.proxyusers.*.users" : "",
+                "yarn.timeline-service.http-authentication.proxyuser.*.users" : "",
                 "yarn.timeline-service.http-authentication.cookie.domain" : ""
               }
             }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d44269f/ambari-web/app/data/configs/wizards/secure_mapping.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/configs/wizards/secure_mapping.js b/ambari-web/app/data/configs/wizards/secure_mapping.js
index 8d952e8..2d24628 100644
--- a/ambari-web/app/data/configs/wizards/secure_mapping.js
+++ b/ambari-web/app/data/configs/wizards/secure_mapping.js
@@ -854,7 +854,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.timeline-service.http-authentication.proxyusers.*.hosts',
+    "name": 'yarn.timeline-service.http-authentication.proxyuser.*.hosts',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -862,7 +862,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.timeline-service.http-authentication.proxyusers.*.users',
+    "name": 'yarn.timeline-service.http-authentication.proxyuser.*.users',
     "value": "",
     "serviceName": "YARN",
     "templateName": [],
@@ -870,7 +870,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.timeline-service.http-authentication.proxyusers.*.groups',
+    "name": 'yarn.timeline-service.http-authentication.proxyuser.*.groups',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -958,7 +958,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.resourcemanager.proxyusers.*.hosts',
+    "name": 'yarn.resourcemanager.proxyuser.*.hosts',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -966,7 +966,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.resourcemanager.proxyusers.*.users',
+    "name": 'yarn.resourcemanager.proxyuser.*.users',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -974,7 +974,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.resourcemanager.proxyusers.*.groups',
+    "name": 'yarn.resourcemanager.proxyuser.*.groups',
     "value": "",
     "templateName": [],
     "foreignKey": null,


[22/50] [abbrv] ambari git commit: AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-web/app/data/configs/wizards/secure_mapping.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/configs/wizards/secure_mapping.js b/ambari-web/app/data/configs/wizards/secure_mapping.js
index 8d952e8..2d24628 100644
--- a/ambari-web/app/data/configs/wizards/secure_mapping.js
+++ b/ambari-web/app/data/configs/wizards/secure_mapping.js
@@ -854,7 +854,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.timeline-service.http-authentication.proxyusers.*.hosts',
+    "name": 'yarn.timeline-service.http-authentication.proxyuser.*.hosts',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -862,7 +862,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.timeline-service.http-authentication.proxyusers.*.users',
+    "name": 'yarn.timeline-service.http-authentication.proxyuser.*.users',
     "value": "",
     "serviceName": "YARN",
     "templateName": [],
@@ -870,7 +870,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.timeline-service.http-authentication.proxyusers.*.groups',
+    "name": 'yarn.timeline-service.http-authentication.proxyuser.*.groups',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -958,7 +958,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.resourcemanager.proxyusers.*.hosts',
+    "name": 'yarn.resourcemanager.proxyuser.*.hosts',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -966,7 +966,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.resourcemanager.proxyusers.*.users',
+    "name": 'yarn.resourcemanager.proxyuser.*.users',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -974,7 +974,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.resourcemanager.proxyusers.*.groups',
+    "name": 'yarn.resourcemanager.proxyuser.*.groups',
     "value": "",
     "templateName": [],
     "foreignKey": null,


[29/50] [abbrv] ambari git commit: Revert "AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)"

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json.orig b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json.orig
deleted file mode 100644
index bcc5359..0000000
--- a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json.orig
+++ /dev/null
@@ -1,1320 +0,0 @@
-{
-  "identities": [{
-    "principal": {
-      "type": "service",
-      "value": "HTTP/_HOST@${realm}"
-    },
-    "name": "spnego",
-    "keytab": {
-      "owner": {
-        "access": "r",
-        "name": "root"
-      },
-      "file": "${keytab_dir}/spnego.service.keytab",
-      "group": {
-        "access": "r",
-        "name": "${cluster-env/user_group}"
-      }
-    }
-  }, {
-    "principal": {
-      "configuration": "cluster-env/smokeuser_principal_name",
-      "type": "user",
-      "local_username": "${cluster-env/smokeuser}",
-      "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}"
-    },
-    "name": "smokeuser",
-    "keytab": {
-      "owner": {
-        "access": "r",
-        "name": "${cluster-env/smokeuser}"
-      },
-      "file": "${keytab_dir}/smokeuser.headless.keytab",
-      "configuration": "cluster-env/smokeuser_keytab",
-      "group": {
-        "access": "r",
-        "name": "${cluster-env/user_group}"
-      }
-    }
-  }],
-  "services": [{
-    "components": [{
-      "name": "MAHOUT"
-    }],
-    "identities": [{
-      "name": "/smokeuser"
-    }, {
-      "name": "/HDFS/hdfs"
-    }],
-    "name": "MAHOUT"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "mapred-site/mapreduce.jobhistory.principal",
-          "type": "service",
-          "local_username": "${mapred-env/mapred_user}",
-          "value": "jhs/_HOST@${realm}"
-        },
-        "name": "history_server_jhs",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${mapred-env/mapred_user}"
-          },
-          "file": "${keytab_dir}/jhs.service.keytab",
-          "configuration": "mapred-site/mapreduce.jobhistory.keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal",
-          "type": "service",
-          "value": "HTTP/_HOST@${realm}"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "file": "${keytab_dir}/spnego.service.keytab",
-          "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file",
-          "group": {}
-        }
-      }],
-      "name": "HISTORYSERVER"
-    }],
-    "identities": [{
-      "name": "/spnego"
-    }, {
-      "name": "/HDFS/hdfs"
-    }, {
-      "name": "/smokeuser"
-    }],
-    "name": "MAPREDUCE2"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "oozie-site/oozie.service.HadoopAccessorService.kerberos.principal",
-          "type": "service",
-          "local_username": "${oozie-env/oozie_user}",
-          "value": "oozie/_HOST@${realm}"
-        },
-        "name": "oozie_server",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${oozie-env/oozie_user}"
-          },
-          "file": "${keytab_dir}/oozie.service.keytab",
-          "configuration": "oozie-site/oozie.service.HadoopAccessorService.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "oozie-site/oozie.authentication.kerberos.principal",
-          "type": "service"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "configuration": "oozie-site/oozie.authentication.kerberos.keytab",
-          "group": {}
-        }
-      }],
-      "name": "OOZIE_SERVER"
-    }],
-    "identities": [{
-      "name": "/spnego"
-    }, {
-      "name": "/smokeuser"
-    }, {
-      "name": "/HDFS/hdfs"
-    }],
-    "auth_to_local_properties": [
-      "oozie-site/oozie.authentication.kerberos.name.rules"
-    ],
-    "configurations": [{
-      "oozie-site": {
-        "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
-        "oozie.authentication.type": "kerberos",
-        "oozie.service.AuthorizationService.authorization.enabled": "true",
-        "local.realm": "${realm}",
-        "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials"
-      }
-    }],
-    "name": "OOZIE"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
-          "type": "service",
-          "local_username": "${hadoop-env/hdfs_user}",
-          "value": "nn/_HOST@${realm}"
-        },
-        "name": "secondary_namenode_nn",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hadoop-env/hdfs_user}"
-          },
-          "file": "${keytab_dir}/nn.service.keytab",
-          "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal",
-          "type": "service",
-          "value": "HTTP/_HOST@${realm}"
-        },
-        "name": "/spnego"
-      }],
-      "name": "SECONDARY_NAMENODE"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
-          "type": "service",
-          "local_username": "${hadoop-env/hdfs_user}",
-          "value": "dn/_HOST@${realm}"
-        },
-        "name": "datanode_dn",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hadoop-env/hdfs_user}"
-          },
-          "file": "${keytab_dir}/dn.service.keytab",
-          "configuration": "hdfs-site/dfs.datanode.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "configurations": [{
-        "hdfs-site": {
-          "dfs.datanode.address": "0.0.0.0:1019",
-          "dfs.datanode.http.address": "0.0.0.0:1022"
-        }
-      }],
-      "name": "DATANODE"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "hdfs-site/nfs.kerberos.principal",
-          "type": "service",
-          "local_username": "${hadoop-env/hdfs_user}",
-          "value": "nfs/_HOST@${realm}"
-        },
-        "name": "nfsgateway",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hadoop-env/hdfs_user}"
-          },
-          "file": "${keytab_dir}/nfs.service.keytab",
-          "configuration": "hdfs-site/nfs.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "name": "NFS_GATEWAY"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
-          "type": "service",
-          "local_username": "${hadoop-env/hdfs_user}",
-          "value": "jn/_HOST@${realm}"
-        },
-        "name": "journalnode_jn",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hadoop-env/hdfs_user}"
-          },
-          "file": "${keytab_dir}/jn.service.keytab",
-          "configuration": "hdfs-site/dfs.journalnode.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal",
-          "type": "service",
-          "value": "HTTP/_HOST@${realm}"
-        },
-        "name": "/spnego"
-      }],
-      "name": "JOURNALNODE"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
-          "type": "service",
-          "local_username": "${hadoop-env/hdfs_user}",
-          "value": "nn/_HOST@${realm}"
-        },
-        "name": "namenode_nn",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hadoop-env/hdfs_user}"
-          },
-          "file": "${keytab_dir}/nn.service.keytab",
-          "configuration": "hdfs-site/dfs.namenode.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal",
-          "type": "service",
-          "value": "HTTP/_HOST@${realm}"
-        },
-        "name": "/spnego"
-      }],
-      "configurations": [{
-        "hdfs-site": {
-          "dfs.block.access.token.enable": "true"
-        }
-      }],
-      "name": "NAMENODE"
-    }],
-    "identities": [{
-      "principal": {
-        "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal",
-        "type": "service",
-        "value": "HTTP/_HOST@${realm}"
-      },
-      "name": "/spnego",
-      "keytab": {
-        "owner": {},
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab",
-        "group": {}
-      }
-    }, {
-      "name": "/smokeuser"
-    }, {
-      "principal": {
-        "configuration": "hadoop-env/hdfs_principal_name",
-        "type": "user",
-        "local_username": "${hadoop-env/hdfs_user}",
-        "value": "${hadoop-env/hdfs_user}-${cluster_name|toLower()}@${realm}"
-      },
-      "name": "hdfs",
-      "keytab": {
-        "owner": {
-          "access": "r",
-          "name": "${hadoop-env/hdfs_user}"
-        },
-        "file": "${keytab_dir}/hdfs.headless.keytab",
-        "configuration": "hadoop-env/hdfs_user_keytab",
-        "group": {
-          "access": "r",
-          "name": "${cluster-env/user_group}"
-        }
-      }
-    }],
-    "auth_to_local_properties": [
-      "core-site/hadoop.security.auth_to_local"
-    ],
-    "configurations": [{
-      "core-site": {
-        "hadoop.security.authorization": "true",
-        "hadoop.security.authentication": "kerberos",
-        "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
-      }
-    }],
-    "name": "HDFS"
-  }, {
-    "components": [{
-      "configurations": [{
-        "tez-site": {
-          "tez.am.view-acls": ""
-        }
-      }],
-      "name": "TEZ_CLIENT"
-    }],
-    "name": "TEZ"
-  }, {
-    "components": [{
-      "name": "SPARK_CLIENT"
-    }, {
-      "name": "SPARK_JOBHISTORYSERVER"
-    }],
-    "identities": [{
-      "name": "/smokeuser"
-    }, {
-      "name": "/HDFS/hdfs"
-    }, {
-      "principal": {
-        "configuration": "spark-defaults/spark.history.kerberos.principal",
-        "type": "user",
-        "local_username": "${spark-env/spark_user}",
-        "value": "${spark-env/spark_user}-${cluster_name|toLower()}@${realm}"
-      },
-      "name": "sparkuser",
-      "keytab": {
-        "owner": {
-          "access": "r",
-          "name": "${spark-env/spark_user}"
-        },
-        "file": "${keytab_dir}/spark.headless.keytab",
-        "configuration": "spark-defaults/spark.history.kerberos.keytab",
-        "group": {
-          "access": "",
-          "name": "${cluster-env/user_group}"
-        }
-      }
-    }],
-    "configurations": [{
-      "spark-defaults": {
-        "spark.history.kerberos.enabled": "true"
-      }
-    }],
-    "name": "SPARK"
-  }, {
-    "components": [{
-      "name": "ACCUMULO_MASTER"
-    }, {
-      "name": "ACCUMULO_MONITOR"
-    }, {
-      "name": "ACCUMULO_CLIENT"
-    }, {
-      "name": "ACCUMULO_TRACER"
-    }, {
-      "name": "ACCUMULO_TSERVER"
-    }, {
-      "name": "ACCUMULO_GC"
-    }],
-    "identities": [{
-      "principal": {
-        "configuration": "accumulo-env/accumulo_principal_name",
-        "type": "user",
-        "local_username": "${accumulo-env/accumulo_user}",
-        "value": "${accumulo-env/accumulo_user}-${cluster_name|toLower()}@${realm}"
-      },
-      "name": "accumulo",
-      "keytab": {
-        "owner": {
-          "access": "r",
-          "name": "${accumulo-env/accumulo_user}"
-        },
-        "file": "${keytab_dir}/accumulo.headless.keytab",
-        "configuration": "accumulo-env/accumulo_user_keytab",
-        "group": {
-          "access": "r",
-          "name": "${cluster-env/user_group}"
-        }
-      }
-    }, {
-      "principal": {
-        "configuration": "accumulo-site/general.kerberos.principal",
-        "type": "service",
-        "local_username": "${accumulo-env/accumulo_user}",
-        "value": "${accumulo-env/accumulo_user}/_HOST@${realm}"
-      },
-      "name": "accumulo_service",
-      "keytab": {
-        "owner": {
-          "access": "r",
-          "name": "${accumulo-env/accumulo_user}"
-        },
-        "file": "${keytab_dir}/accumulo.service.keytab",
-        "configuration": "accumulo-site/general.kerberos.keytab",
-        "group": {
-          "access": "",
-          "name": "${cluster-env/user_group}"
-        }
-      }
-    }, {
-      "principal": {
-        "configuration": "accumulo-site/trace.user",
-        "type": "user",
-        "local_username": "${accumulo-env/accumulo_user}",
-        "value": "tracer-${cluster_name|toLower()}@${realm}"
-      },
-      "name": "accumulo_tracer",
-      "keytab": {
-        "owner": {
-          "access": "r",
-          "name": "${accumulo-env/accumulo_user}"
-        },
-        "file": "${keytab_dir}/accumulo-tracer.headless.keytab",
-        "configuration": "accumulo-site/trace.token.property.keytab",
-        "group": {
-          "access": "",
-          "name": "${cluster-env/user_group}"
-        }
-      }
-    }, {
-      "name": "/HDFS/hdfs"
-    }, {
-      "name": "/smokeuser"
-    }],
-    "configurations": [{
-      "accumulo-site": {
-        "instance.security.authenticator": "org.apache.accumulo.server.security.handler.KerberosAuthenticator",
-        "instance.rpc.sasl.enabled": "true",
-        "general.delegation.token.lifetime": "7d",
-        "trace.token.type": "org.apache.accumulo.core.client.security.tokens.KerberosToken",
-        "instance.security.permissionHandler": "org.apache.accumulo.server.security.handler.KerberosPermissionHandler",
-        "general.delegation.token.update.interval": "1d",
-        "instance.security.authorizor": "org.apache.accumulo.server.security.handler.KerberosAuthorizor"
-      }
-    }],
-    "name": "ACCUMULO"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "zookeeper-env/zookeeper_principal_name",
-          "type": "service",
-          "value": "zookeeper/_HOST@${realm}"
-        },
-        "name": "zookeeper_zk",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${zookeeper-env/zk_user}"
-          },
-          "file": "${keytab_dir}/zk.service.keytab",
-          "configuration": "zookeeper-env/zookeeper_keytab_path",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "name": "ZOOKEEPER_SERVER"
-    }],
-    "identities": [{
-      "name": "/smokeuser"
-    }],
-    "name": "ZOOKEEPER"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "hbase-site/hbase.regionserver.kerberos.principal",
-          "type": "service",
-          "local_username": "${hbase-env/hbase_user}",
-          "value": "hbase/_HOST@${realm}"
-        },
-        "name": "hbase_regionserver_hbase",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hbase-env/hbase_user}"
-          },
-          "file": "${keytab_dir}/hbase.service.keytab",
-          "configuration": "hbase-site/hbase.regionserver.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "name": "HBASE_REGIONSERVER"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "hbase-site/hbase.master.kerberos.principal",
-          "type": "service",
-          "local_username": "${hbase-env/hbase_user}",
-          "value": "hbase/_HOST@${realm}"
-        },
-        "name": "hbase_master_hbase",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hbase-env/hbase_user}"
-          },
-          "file": "${keytab_dir}/hbase.service.keytab",
-          "configuration": "hbase-site/hbase.master.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "name": "HBASE_MASTER"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "hbase-site/phoenix.queryserver.kerberos.principal",
-          "type": "service",
-          "local_username": "${hbase-env/hbase_user}",
-          "value": "hbase/_HOST@${realm}"
-        },
-        "name": "hbase_queryserver_hbase",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hbase-env/hbase_user}"
-          },
-          "file": "${keytab_dir}/hbase.service.keytab",
-          "configuration": "hbase-site/phoenix.queryserver.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "name": "PHOENIX_QUERY_SERVER"
-    }],
-    "identities": [{
-      "name": "/spnego"
-    }, {
-      "name": "/HDFS/hdfs"
-    }, {
-      "principal": {
-        "configuration": "hbase-env/hbase_principal_name",
-        "type": "user",
-        "local_username": "${hbase-env/hbase_user}",
-        "value": "${hbase-env/hbase_user}-${cluster_name|toLower()}@${realm}"
-      },
-      "name": "hbase",
-      "keytab": {
-        "owner": {
-          "access": "r",
-          "name": "${hbase-env/hbase_user}"
-        },
-        "file": "${keytab_dir}/hbase.headless.keytab",
-        "configuration": "hbase-env/hbase_user_keytab",
-        "group": {
-          "access": "r",
-          "name": "${cluster-env/user_group}"
-        }
-      }
-    }, {
-      "name": "/smokeuser"
-    }],
-    "configurations": [{
-      "hbase-site": {
-        "hbase.coprocessor.master.classes": "{{hbase_coprocessor_master_classes}}",
-        "hbase.security.authentication": "kerberos",
-        "hbase.coprocessor.region.classes": "{{hbase_coprocessor_region_classes}}",
-        "hbase.security.authorization": "true",
-        "hbase.bulkload.staging.dir": "/apps/hbase/staging",
-        "zookeeper.znode.parent": "/hbase-secure"
-      }
-    }],
-    "name": "HBASE"
-  }, {
-    "components": [{
-      "name": "KERBEROS_CLIENT"
-    }],
-    "identities": [{
-      "name": "/smokeuser"
-    }],
-    "name": "KERBEROS"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "kms-site/hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal",
-          "type": "service"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "configuration": "kms-site/hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab",
-          "group": {}
-        }
-      }, {
-        "name": "/smokeuser"
-      }],
-      "name": "RANGER_KMS_SERVER"
-    }],
-    "identities": [{
-      "name": "/spnego",
-      "keytab": {
-        "owner": {},
-        "configuration": "kms-site/hadoop.kms.authentication.kerberos.keytab",
-        "group": {}
-      }
-    }, {
-      "name": "/smokeuser"
-    }],
-    "configurations": [{
-      "kms-site": {
-        "hadoop.kms.authentication.kerberos.principal": "*",
-        "hadoop.kms.authentication.type": "kerberos"
-      }
-    }],
-    "name": "RANGER_KMS"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "yarn-site/yarn.nodemanager.principal",
-          "type": "service",
-          "local_username": "${yarn-env/yarn_user}",
-          "value": "nm/_HOST@${realm}"
-        },
-        "name": "nodemanager_nm",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${yarn-env/yarn_user}"
-          },
-          "file": "${keytab_dir}/nm.service.keytab",
-          "configuration": "yarn-site/yarn.nodemanager.keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal",
-          "type": "service",
-          "value": "HTTP/_HOST@${realm}"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "file": "${keytab_dir}/spnego.service.keytab",
-          "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file",
-          "group": {}
-        }
-      }],
-      "configurations": [{
-        "yarn-site": {
-          "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-        }
-      }],
-      "name": "NODEMANAGER"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "yarn-site/yarn.timeline-service.principal",
-          "type": "service",
-          "local_username": "${yarn-env/yarn_user}",
-          "value": "yarn/_HOST@${realm}"
-        },
-        "name": "app_timeline_server_yarn",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${yarn-env/yarn_user}"
-          },
-          "file": "${keytab_dir}/yarn.service.keytab",
-          "configuration": "yarn-site/yarn.timeline-service.keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal",
-          "type": "service",
-          "value": "HTTP/_HOST@${realm}"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "file": "${keytab_dir}/spnego.service.keytab",
-          "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab",
-          "group": {}
-        }
-      }],
-      "name": "APP_TIMELINE_SERVER"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "yarn-site/yarn.resourcemanager.principal",
-          "type": "service",
-          "local_username": "${yarn-env/yarn_user}",
-          "value": "rm/_HOST@${realm}"
-        },
-        "name": "resource_manager_rm",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${yarn-env/yarn_user}"
-          },
-          "file": "${keytab_dir}/rm.service.keytab",
-          "configuration": "yarn-site/yarn.resourcemanager.keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal",
-          "type": "service",
-          "value": "HTTP/_HOST@${realm}"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "file": "${keytab_dir}/spnego.service.keytab",
-          "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file",
-          "group": {}
-        }
-      }],
-      "name": "RESOURCEMANAGER"
-    }],
-    "identities": [{
-      "name": "/spnego"
-    }, {
-      "name": "/HDFS/hdfs"
-    }, {
-      "name": "/smokeuser"
-    }],
-    "configurations": [{
-      "capacity-scheduler": {
-        "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
-        "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
-        "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
-        "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
-        "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
-      }
-    }, {
-      "yarn-site": {
-        "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-        "yarn.resourcemanager.proxyusers.*.users": "",
-        "yarn.timeline-service.http-authentication.token.validity": "",
-        "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
-        "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-        "yarn.timeline-service.http-authentication.cookie.path": "",
-        "yarn.timeline-service.http-authentication.type": "kerberos",
-        "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-        "yarn.acl.enable": "true",
-        "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-        "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-        "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-        "yarn.timeline-service.http-authentication.signature.secret": "",
-        "yarn.timeline-service.http-authentication.signature.secret.file": "",
-        "yarn.resourcemanager.proxyusers.*.hosts": "",
-        "yarn.resourcemanager.proxyusers.*.groups": "",
-        "yarn.timeline-service.enabled": "true",
-        "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-        "yarn.timeline-service.http-authentication.cookie.domain": ""
-      }
-    }, {
-      "core-site": {
-        "hadoop.proxyuser.yarn.groups": "*",
-        "hadoop.proxyuser.yarn.hosts": "${yarn-site/yarn.resourcemanager.hostname}"
-      }
-    }],
-    "name": "YARN"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "knox-env/knox_principal_name",
-          "type": "service",
-          "local_username": "${knox-env/knox_user}",
-          "value": "${knox-env/knox_user}/_HOST@${realm}"
-        },
-        "name": "knox_principal",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${knox-env/knox_user}"
-          },
-          "file": "${keytab_dir}/knox.service.keytab",
-          "configuration": "knox-env/knox_keytab_path",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "configurations": [{
-        "oozie-site": {
-          "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
-          "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
-        }
-      }, {
-        "webhcat-site": {
-          "webhcat.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
-          "webhcat.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
-        }
-      }, {
-        "gateway-site": {
-          "gateway.hadoop.kerberos.secured": "true",
-          "java.security.krb5.conf": "/etc/krb5.conf"
-        }
-      }, {
-        "core-site": {
-          "hadoop.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}",
-          "hadoop.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}"
-        }
-      }],
-      "name": "KNOX_GATEWAY"
-    }],
-    "name": "KNOX"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "storm-env/storm_ui_principal_name",
-          "type": "service"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "configuration": "storm-env/storm_ui_keytab",
-          "group": {}
-        }
-      }],
-      "name": "STORM_UI_SERVER"
-    }, {
-      "name": "SUPERVISOR"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "storm-env/nimbus_principal_name",
-          "type": "service",
-          "value": "nimbus/_HOST@${realm}"
-        },
-        "name": "nimbus_server",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${storm-env/storm_user}"
-          },
-          "file": "${keytab_dir}/nimbus.service.keytab",
-          "configuration": "storm-env/nimbus_keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "name": "NIMBUS"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "storm-env/nimbus_principal_name",
-          "type": "service",
-          "value": "nimbus/_HOST@${realm}"
-        },
-        "name": "nimbus_server",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${storm-env/storm_user}"
-          },
-          "file": "${keytab_dir}/nimbus.service.keytab",
-          "configuration": "storm-env/nimbus_keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "name": "DRPC_SERVER"
-    }],
-    "identities": [{
-      "name": "/spnego"
-    }, {
-      "name": "/smokeuser"
-    }, {
-      "principal": {
-        "configuration": "storm-env/storm_principal_name",
-        "type": "user",
-        "value": "${storm-env/storm_user}-${cluster_name|toLower()}@${realm}"
-      },
-      "name": "storm_components",
-      "keytab": {
-        "owner": {
-          "access": "r",
-          "name": "${storm-env/storm_user}"
-        },
-        "file": "${keytab_dir}/storm.headless.keytab",
-        "configuration": "storm-env/storm_keytab",
-        "group": {
-          "access": "",
-          "name": "${cluster-env/user_group}"
-        }
-      }
-    }],
-    "configurations": [{
-      "storm-site": {
-        "nimbus.authorizer": "backtype.storm.security.auth.authorizer.SimpleACLAuthorizer",
-        "java.security.auth.login.config": "{{conf_dir}}/storm_jaas.conf",
-        "drpc.authorizer": "backtype.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer",
-        "storm.principal.tolocal": "backtype.storm.security.auth.KerberosPrincipalToLocal",
-        "storm.zookeeper.superACL": "sasl:{{storm_bare_jaas_principal}}",
-        "ui.filter.params": "{'type': 'kerberos', 'kerberos.principal': '{{storm_ui_jaas_principal}}', 'kerberos.keytab': '{{storm_ui_keytab_path}}', 'kerberos.name.rules': 'DEFAULT'}",
-        "nimbus.supervisor.users": "['{{storm_bare_jaas_principal}}']",
-        "nimbus.admins": "['{{storm_bare_jaas_principal}}']",
-        "ui.filter": "org.apache.hadoop.security.authentication.server.AuthenticationFilter",
-        "supervisor.enable": "true"
-      }
-    }],
-    "name": "STORM"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "application-properties/atlas.authentication.principal",
-          "type": "service",
-          "local_username": "${atlas-env/metadata_user}",
-          "value": "atlas/_HOST@${realm}"
-        },
-        "name": "atlas",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${atlas-env/metadata_user}"
-          },
-          "file": "${keytab_dir}/atlas.service.keytab",
-          "configuration": "application-properties/atlas.authentication.keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "application-properties/atlas.http.authentication.kerberos.principal",
-          "type": "service",
-          "value": "HTTP/_HOST@${realm}"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "configuration": "application-properties/atlas.http.authentication.kerberos.keytab",
-          "group": {}
-        }
-      }],
-      "name": "ATLAS_SERVER"
-    }],
-    "auth_to_local_properties": [
-      "application-properties/atlas.http.authentication.kerberos.name.rules|new_lines_escaped"
-    ],
-    "configurations": [{
-      "application-properties": {
-        "atlas.authentication.method": "kerberos",
-        "atlas.http.authentication.enabled": "true",
-        "atlas.http.authentication.type": "kerberos"
-      }
-    }],
-    "name": "ATLAS"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
-          "type": "service",
-          "local_username": "${hive-env/hive_user}",
-          "value": "hive/_HOST@${realm}"
-        },
-        "name": "hive_server_hive",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hive-env/hive_user}"
-          },
-          "file": "${keytab_dir}/hive.service.keytab",
-          "configuration": "hive-site/hive.server2.authentication.kerberos.keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "hive-site/hive.server2.authentication.spnego.principal",
-          "type": "service"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "configuration": "hive-site/hive.server2.authentication.spnego.keytab",
-          "group": {}
-        }
-      }],
-      "name": "HIVE_SERVER"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "hive-site/hive.metastore.kerberos.principal",
-          "type": "service",
-          "local_username": "${hive-env/hive_user}",
-          "value": "hive/_HOST@${realm}"
-        },
-        "name": "hive_metastore_hive",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${hive-env/hive_user}"
-          },
-          "file": "${keytab_dir}/hive.service.keytab",
-          "configuration": "hive-site/hive.metastore.kerberos.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "name": "HIVE_METASTORE"
-    }, {
-      "identities": [{
-        "principal": {
-          "configuration": "webhcat-site/templeton.kerberos.principal",
-          "type": "service"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "configuration": "webhcat-site/templeton.kerberos.keytab",
-          "group": {}
-        }
-      }],
-      "name": "WEBHCAT_SERVER"
-    }],
-    "identities": [{
-      "name": "/spnego"
-    }, {
-      "name": "/smokeuser"
-    }],
-    "configurations": [{
-      "hive-site": {
-        "hive.metastore.sasl.enabled": "true",
-        "hive.server2.authentication": "KERBEROS"
-      }
-    }, {
-      "webhcat-site": {
-        "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}",
-        "templeton.kerberos.secret": "secret"
-      }
-    }, {
-      "core-site": {
-        "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host}"
-      }
-    }],
-    "name": "HIVE"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "ams-hbase-security-site/hbase.master.kerberos.principal",
-          "type": "service",
-          "local_username": "${ams-env/ambari_metrics_user}",
-          "value": "amshbasemaster/_HOST@${realm}"
-        },
-        "name": "ams_hbase_master_hbase",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${ams-env/ambari_metrics_user}"
-          },
-          "file": "${keytab_dir}/ams-hbase.master.keytab",
-          "configuration": "ams-hbase-security-site/hbase.master.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "ams-hbase-security-site/hbase.regionserver.kerberos.principal",
-          "type": "service",
-          "local_username": "${ams-env/ambari_metrics_user}",
-          "value": "amshbasers/_HOST@${realm}"
-        },
-        "name": "ams_hbase_regionserver_hbase",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${ams-env/ambari_metrics_user}"
-          },
-          "file": "${keytab_dir}/ams-hbase.regionserver.keytab",
-          "configuration": "ams-hbase-security-site/hbase.regionserver.keytab.file",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "ams-hbase-security-site/hbase.myclient.principal",
-          "type": "service",
-          "local_username": "${ams-env/ambari_metrics_user}",
-          "value": "amshbase/_HOST@${realm}"
-        },
-        "name": "ams_collector",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${ams-env/ambari_metrics_user}"
-          },
-          "file": "${keytab_dir}/ams.collector.keytab",
-          "configuration": "ams-hbase-security-site/hbase.myclient.keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "ams-hbase-security-site/ams.zookeeper.principal",
-          "type": "service",
-          "local_username": "${ams-env/ambari_metrics_user}",
-          "value": "amszk/_HOST@${realm}"
-        },
-        "name": "ams_zookeeper",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${ams-env/ambari_metrics_user}"
-          },
-          "file": "${keytab_dir}/ams-zk.service.keytab",
-          "configuration": "ams-hbase-security-site/ams.zookeeper.keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "configurations": [{
-        "ams-hbase-security-site": {
-          "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
-          "hadoop.security.authentication": "kerberos",
-          "hbase.security.authentication": "kerberos",
-          "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController",
-          "hbase.security.authorization": "true",
-          "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "true",
-          "hbase.zookeeper.property.jaasLoginRenew": "3600000",
-          "hbase.zookeeper.property.authProvider.1": "org.apache.zookeeper.server.auth.SASLAuthenticationProvider",
-          "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "true"
-        }
-      },
-        {
-          "ams-hbase-site": {
-            "zookeeper.znode.parent": "/ams-hbase-secure"
-          }
-        }
-      ],
-      "name": "METRICS_COLLECTOR"
-    }],
-    "identities": [{
-      "name": "/spnego"
-    }],
-    "name": "AMBARI_METRICS"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "kafka-env/kafka_principal_name",
-          "type": "service",
-          "value": "${kafka-env/kafka_user}/_HOST@${realm}"
-        },
-        "name": "kafka_broker",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${kafka-env/kafka_user}"
-          },
-          "file": "${keytab_dir}/kafka.service.keytab",
-          "configuration": "kafka-env/kafka_keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }],
-      "name": "KAFKA_BROKER"
-    }],
-    "identities": [{
-      "name": "/smokeuser"
-    }],
-    "configurations": [{
-      "kafka-broker": {
-        "principal.to.local.class": "kafka.security.auth.KerberosPrincipalToLocal",
-        "authorizer.class.name": "kafka.security.auth.SimpleAclAuthorizer",
-        "super.users": "user:${kafka-env/kafka_user}",
-        "security.inter.broker.protocol": "PLAINTEXTSASL"
-      }
-    }],
-    "name": "KAFKA"
-  }, {
-    "components": [{
-      "identities": [{
-        "principal": {
-          "configuration": "falcon-startup.properties/*.falcon.service.authentication.kerberos.principal",
-          "type": "service",
-          "local_username": "${falcon-env/falcon_user}",
-          "value": "falcon/_HOST@${realm}"
-        },
-        "name": "falcon_server",
-        "keytab": {
-          "owner": {
-            "access": "r",
-            "name": "${falcon-env/falcon_user}"
-          },
-          "file": "${keytab_dir}/falcon.service.keytab",
-          "configuration": "falcon-startup.properties/*.falcon.service.authentication.kerberos.keytab",
-          "group": {
-            "access": "",
-            "name": "${cluster-env/user_group}"
-          }
-        }
-      }, {
-        "principal": {
-          "configuration": "falcon-startup.properties/*.falcon.http.authentication.kerberos.principal",
-          "type": "service",
-          "value": "HTTP/_HOST@${realm}"
-        },
-        "name": "/spnego",
-        "keytab": {
-          "owner": {},
-          "configuration": "falcon-startup.properties/*.falcon.http.authentication.kerberos.keytab",
-          "group": {}
-        }
-      }],
-      "name": "FALCON_SERVER"
-    }],
-    "identities": [{
-      "name": "/spnego"
-    }, {
-      "name": "/smokeuser"
-    }, {
-      "name": "/HDFS/hdfs"
-    }],
-    "auth_to_local_properties": [
-      "falcon-startup.properties/*.falcon.http.authentication.kerberos.name.rules|new_lines_escaped"
-    ],
-    "configurations": [{
-      "falcon-startup.properties": {
-        "*.dfs.namenode.kerberos.principal": "nn/_HOST@${realm}",
-        "*.falcon.http.authentication.type": "kerberos",
-        "*.falcon.authentication.type": "kerberos"
-      }
-    }],
-    "name": "FALCON"
-  }],
-  "properties": {
-    "additional_realms": "",
-    "keytab_dir": "/etc/security/keytabs",
-    "realm": "EXAMPLE.COM"
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
index 0a8f20b..147c1c0 100644
--- a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
+++ b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
@@ -2893,7 +2893,7 @@
             {
               "yarn-site" : {
                 "yarn.timeline-service.http-authentication.signer.secret.provider.object" : "",
-                "yarn.resourcemanager.proxyuser.*.users" : "",
+                "yarn.resourcemanager.proxyusers.*.users" : "",
                 "yarn.timeline-service.http-authentication.token.validity" : "",
                 "yarn.timeline-service.http-authentication.kerberos.name.rules" : "",
                 "yarn.timeline-service.http-authentication.cookie.path" : "",
@@ -2901,14 +2901,14 @@
                 "yarn.resourcemanager.proxy-user-privileges.enabled" : "true",
                 "yarn.acl.enable" : "true",
                 "yarn.timeline-service.http-authentication.signer.secret.provider" : "",
-                "yarn.timeline-service.http-authentication.proxyuser.*.groups" : "",
-                "yarn.timeline-service.http-authentication.proxyuser.*.hosts" : "",
+                "yarn.timeline-service.http-authentication.proxyusers.*.groups" : "",
+                "yarn.timeline-service.http-authentication.proxyusers.*.hosts" : "",
                 "yarn.timeline-service.http-authentication.signature.secret" : "",
                 "yarn.timeline-service.http-authentication.signature.secret.file" : "",
-                "yarn.resourcemanager.proxyuser.*.hosts" : "",
-                "yarn.resourcemanager.proxyuser.*.groups" : "",
+                "yarn.resourcemanager.proxyusers.*.hosts" : "",
+                "yarn.resourcemanager.proxyusers.*.groups" : "",
                 "yarn.timeline-service.enabled" : "false",
-                "yarn.timeline-service.http-authentication.proxyuser.*.users" : "",
+                "yarn.timeline-service.http-authentication.proxyusers.*.users" : "",
                 "yarn.timeline-service.http-authentication.cookie.domain" : ""
               }
             }


[12/50] [abbrv] ambari git commit: AMBARI-19044 Install & configure Ranger plugin components independently of Ranger admin components (mugdha)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index f108622..0014b7c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -507,14 +507,6 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     if webhcat_queue is not None:
       putWebhcatSiteProperty("templeton.hadoop.queue.name", webhcat_queue)
 
-
-    # Recommend Ranger Hive authorization as per Ranger Hive plugin property
-    if "ranger-env" in services["configurations"] and "hive-env" in services["configurations"] and \
-        "ranger-hive-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
-      rangerEnvHivePluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-hive-plugin-enabled"]
-      if (rangerEnvHivePluginProperty.lower() == "yes"):
-        putHiveEnvProperty("hive_security_authorization", "RANGER")
-
     # Security
     if ("configurations" not in services) or ("hive-env" not in services["configurations"]) or \
               ("properties" not in services["configurations"]["hive-env"]) or \
@@ -1233,9 +1225,10 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
   def validateHDFSRangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     ranger_plugin_properties = getSiteProperties(configurations, "ranger-hdfs-plugin-properties")
     ranger_plugin_enabled = ranger_plugin_properties['ranger-hdfs-plugin-enabled'] if ranger_plugin_properties else 'No'
-    if (ranger_plugin_enabled.lower() == 'yes'):
+    if 'RANGER' in servicesList and (ranger_plugin_enabled.lower() == 'yes'):
       # ranger-hdfs-plugin must be enabled in ranger-env
       ranger_env = getServicesSiteProperties(services, 'ranger-env')
       if not ranger_env or not 'ranger-hdfs-plugin-enabled' in ranger_env or \
@@ -1465,6 +1458,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     validationItems = []
     hive_env = properties
     hive_site = getSiteProperties(configurations, "hive-site")
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     if "hive_security_authorization" in hive_env and \
         str(hive_env["hive_security_authorization"]).lower() == "none" \
       and str(hive_site["hive.security.authorization.enabled"]).lower() == "true":
@@ -1474,12 +1468,13 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     if "hive_security_authorization" in hive_env and \
         str(hive_env["hive_security_authorization"]).lower() == "ranger":
       # ranger-hive-plugin must be enabled in ranger-env
-      ranger_env = getServicesSiteProperties(services, 'ranger-env')
-      if not ranger_env or not 'ranger-hive-plugin-enabled' in ranger_env or \
-          ranger_env['ranger-hive-plugin-enabled'].lower() != 'yes':
-        validationItems.append({"config-name": 'hive_security_authorization',
-                                "item": self.getWarnItem(
-                                  "ranger-env/ranger-hive-plugin-enabled must be enabled when hive_security_authorization is set to Ranger")})
+      if 'RANGER' in servicesList:
+        ranger_env = getServicesSiteProperties(services, 'ranger-env')
+        if not ranger_env or not 'ranger-hive-plugin-enabled' in ranger_env or \
+            ranger_env['ranger-hive-plugin-enabled'].lower() != 'yes':
+          validationItems.append({"config-name": 'hive_security_authorization',
+                                  "item": self.getWarnItem(
+                                    "ranger-env/ranger-hive-plugin-enabled must be enabled when hive_security_authorization is set to Ranger")})
     return self.toConfigurationValidationProblems(validationItems, "hive-env")
 
   def validateHiveConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
@@ -1633,9 +1628,10 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
   def validateHBASERangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     ranger_plugin_properties = getSiteProperties(configurations, "ranger-hbase-plugin-properties")
     ranger_plugin_enabled = ranger_plugin_properties['ranger-hbase-plugin-enabled'] if ranger_plugin_properties else 'No'
-    if ranger_plugin_enabled.lower() == 'yes':
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
       # ranger-hdfs-plugin must be enabled in ranger-env
       ranger_env = getServicesSiteProperties(services, 'ranger-env')
       if not ranger_env or not 'ranger-hbase-plugin-enabled' in ranger_env or \
@@ -1647,9 +1643,10 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
   def validateKnoxRangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     ranger_plugin_properties = getSiteProperties(configurations, "ranger-knox-plugin-properties")
     ranger_plugin_enabled = ranger_plugin_properties['ranger-knox-plugin-enabled'] if ranger_plugin_properties else 'No'
-    if ranger_plugin_enabled.lower() == 'yes':
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
       # ranger-hdfs-plugin must be enabled in ranger-env
       ranger_env = getServicesSiteProperties(services, 'ranger-env')
       if not ranger_env or not 'ranger-knox-plugin-enabled' in ranger_env or \
@@ -1665,7 +1662,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     ranger_plugin_enabled = ranger_plugin_properties['ranger-kafka-plugin-enabled'] if ranger_plugin_properties else 'No'
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     security_enabled = self.isSecurityEnabled(services)
-    if ranger_plugin_enabled.lower() == 'yes':
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
       # ranger-hdfs-plugin must be enabled in ranger-env
       ranger_env = getServicesSiteProperties(services, 'ranger-env')
       if not ranger_env or not 'ranger-kafka-plugin-enabled' in ranger_env or \
@@ -1686,7 +1683,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     ranger_plugin_enabled = ranger_plugin_properties['ranger-storm-plugin-enabled'] if ranger_plugin_properties else 'No'
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     security_enabled = self.isSecurityEnabled(services)
-    if ranger_plugin_enabled.lower() == 'yes':
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
       # ranger-hdfs-plugin must be enabled in ranger-env
       ranger_env = getServicesSiteProperties(services, 'ranger-env')
       if not ranger_env or not 'ranger-storm-plugin-enabled' in ranger_env or \
@@ -1719,9 +1716,10 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
   def validateYARNRangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
     ranger_plugin_properties = getSiteProperties(configurations, "ranger-yarn-plugin-properties")
     ranger_plugin_enabled = ranger_plugin_properties['ranger-yarn-plugin-enabled'] if ranger_plugin_properties else 'No'
-    if ranger_plugin_enabled.lower() == 'yes':
+    if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
       # ranger-hdfs-plugin must be enabled in ranger-env
       ranger_env = getServicesSiteProperties(services, 'ranger-env')
       if not ranger_env or not 'ranger-yarn-plugin-enabled' in ranger_env or \

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-audit.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-audit.xml
index 85c16c8..f670d7e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-audit.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-audit.xml
@@ -23,7 +23,7 @@
     <name>xasecure.audit.is.enabled</name>
     <value>true</value>
     <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db</name>
@@ -39,19 +39,19 @@
         <name>xasecure.audit.destination.db</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.url</name>
     <value>{{audit_jdbc_url}}</value>
     <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.user</name>
     <value>{{xa_audit_db_user}}</value>
     <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.password</name>
@@ -61,25 +61,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.driver</name>
     <value>{{jdbc_driver}}</value>
     <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.credential.provider.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.batch.filespool.dir</name>
     <value>/var/log/hbase/audit/db/spool</value>
     <description>/var/log/hbase/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs</name>
@@ -95,7 +95,7 @@
         <name>xasecure.audit.destination.hdfs</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.dir</name>
@@ -107,13 +107,13 @@
         <name>xasecure.audit.destination.hdfs.dir</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
     <value>/var/log/hbase/audit/hdfs/spool</value>
     <description>/var/log/hbase/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr</name>
@@ -129,7 +129,7 @@
         <name>xasecure.audit.destination.solr</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.urls</name>
@@ -144,7 +144,7 @@
         <name>ranger.audit.solr.urls</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.zookeepers</name>
@@ -156,13 +156,13 @@
         <name>ranger.audit.solr.zookeepers</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
     <value>/var/log/hbase/audit/solr/spool</value>
     <description>/var/log/hbase/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.provider.summary.enabled</name>
@@ -172,6 +172,6 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml
index c761b26..79370bc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml
@@ -23,7 +23,7 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/hbase-client/conf/ranger-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.password</name>
@@ -33,13 +33,13 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/hbase-client/conf/ranger-plugin-truststore.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.password</name>
@@ -49,18 +49,18 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-security.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-security.xml
index 1deb9e5..0ad5e60 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-security.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HBASE/configuration/ranger-hbase-security.xml
@@ -23,37 +23,43 @@
     <name>ranger.plugin.hbase.service.name</name>
     <value>{{repo_name}}</value>
     <description>Name of the Ranger service containing HBase policies</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hbase.policy.source.impl</name>
     <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
     <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hbase.policy.rest.url</name>
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>ranger.plugin.hbase.policy.rest.ssl.config.file</name>
     <value>/etc/hbase/conf/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hbase.policy.pollIntervalMs</name>
     <value>30000</value>
     <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hbase.policy.cache.dir</name>
     <value>/etc/ranger/{{repo_name}}/policycache</value>
     <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.hbase.update.xapolicies.on.grant.revoke</name>
@@ -63,6 +69,6 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
index 71ba3a6..e14a9e8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
@@ -23,7 +23,7 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.password</name>
@@ -33,13 +33,13 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.password</name>
@@ -49,18 +49,18 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-security.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-security.xml
index cfd8a4f..b2b8edb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-security.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/ranger-hdfs-security.xml
@@ -23,42 +23,48 @@
     <name>ranger.plugin.hdfs.service.name</name>
     <value>{{repo_name}}</value>
     <description>Name of the Ranger service containing Hdfs policies</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hdfs.policy.source.impl</name>
     <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
     <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hdfs.policy.rest.url</name>
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>ranger.plugin.hdfs.policy.rest.ssl.config.file</name>
     <value>/etc/hadoop/conf/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hdfs.policy.pollIntervalMs</name>
     <value>30000</value>
     <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hdfs.policy.cache.dir</name>
     <value>/etc/ranger/{{repo_name}}/policycache</value>
     <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.add-hadoop-authorization</name>
     <value>true</value>
     <description>Enable/Disable the default hadoop authorization (based on rwxrwxrwx permission on the resource) if Ranger Authorization fails.</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml
index b210fca..874d0d5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-audit.xml
@@ -23,7 +23,7 @@
     <name>xasecure.audit.is.enabled</name>
     <value>true</value>
     <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db</name>
@@ -39,19 +39,19 @@
         <name>xasecure.audit.destination.db</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.url</name>
     <value>{{audit_jdbc_url}}</value>
     <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.user</name>
     <value>{{xa_audit_db_user}}</value>
     <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.password</name>
@@ -61,25 +61,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.driver</name>
     <value>{{jdbc_driver}}</value>
     <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.credential.provider.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.batch.filespool.dir</name>
     <value>/var/log/hive/audit/db/spool</value>
     <description>/var/log/hive/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs</name>
@@ -95,7 +95,7 @@
         <name>xasecure.audit.destination.hdfs</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.dir</name>
@@ -107,13 +107,13 @@
         <name>xasecure.audit.destination.hdfs.dir</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
     <value>/var/log/hive/audit/hdfs/spool</value>
     <description>/var/log/hive/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr</name>
@@ -129,7 +129,7 @@
         <name>xasecure.audit.destination.solr</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.urls</name>
@@ -144,7 +144,7 @@
         <name>ranger.audit.solr.urls</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.zookeepers</name>
@@ -156,13 +156,13 @@
         <name>ranger.audit.solr.zookeepers</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
     <value>/var/log/hive/audit/solr/spool</value>
     <description>/var/log/hive/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.provider.summary.enabled</name>
@@ -172,6 +172,6 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
index a538843..14e7b16 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-policymgr-ssl.xml
@@ -23,7 +23,7 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/hive-server2/conf/ranger-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.password</name>
@@ -33,13 +33,13 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/hive-server2/conf/ranger-plugin-truststore.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.password</name>
@@ -49,18 +49,18 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-security.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-security.xml
index ce4074a..a07972a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-security.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HIVE/configuration/ranger-hive-security.xml
@@ -23,37 +23,43 @@
     <name>ranger.plugin.hive.service.name</name>
     <value>{{repo_name}}</value>
     <description>Name of the Ranger service containing policies for this HIVE instance</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hive.policy.source.impl</name>
     <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
     <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hive.policy.rest.url</name>
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>ranger.plugin.hive.policy.rest.ssl.config.file</name>
     <value>/usr/hdp/current/hive-server2/conf/conf.server/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hive.policy.pollIntervalMs</name>
     <value>30000</value>
     <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.hive.policy.cache.dir</name>
     <value>/etc/ranger/{{repo_name}}/policycache</value>
     <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.hive.update.xapolicies.on.grant.revoke</name>
@@ -63,6 +69,6 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/ranger-kafka-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/ranger-kafka-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/ranger-kafka-policymgr-ssl.xml
index 24fd407..2f4c121 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/ranger-kafka-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KAFKA/configuration/ranger-kafka-policymgr-ssl.xml
@@ -23,12 +23,12 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/kafka-broker/config/ranger-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/kafka-broker/config/ranger-plugin-truststore.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-audit.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-audit.xml
index 1f3c1d1..abdf2bd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-audit.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-audit.xml
@@ -23,7 +23,7 @@
     <name>xasecure.audit.is.enabled</name>
     <value>true</value>
     <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db</name>
@@ -39,19 +39,19 @@
         <name>xasecure.audit.destination.db</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.url</name>
     <value>{{audit_jdbc_url}}</value>
     <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.user</name>
     <value>{{xa_audit_db_user}}</value>
     <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.password</name>
@@ -61,25 +61,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.driver</name>
     <value>{{jdbc_driver}}</value>
     <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.credential.provider.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.batch.filespool.dir</name>
     <value>/var/log/knox/audit/db/spool</value>
     <description>/var/log/knox/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs</name>
@@ -95,7 +95,7 @@
         <name>xasecure.audit.destination.hdfs</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.dir</name>
@@ -107,13 +107,13 @@
         <name>xasecure.audit.destination.hdfs.dir</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
     <value>/var/log/knox/audit/hdfs/spool</value>
     <description>/var/log/knox/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr</name>
@@ -129,7 +129,7 @@
         <name>xasecure.audit.destination.solr</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.urls</name>
@@ -144,7 +144,7 @@
         <name>ranger.audit.solr.urls</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.zookeepers</name>
@@ -156,13 +156,13 @@
         <name>ranger.audit.solr.zookeepers</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
     <value>/var/log/knox/audit/solr/spool</value>
     <description>/var/log/knox/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.provider.summary.enabled</name>
@@ -172,6 +172,6 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-policymgr-ssl.xml
index bb0878f..6cc2351 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-policymgr-ssl.xml
@@ -23,7 +23,7 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/knox-server/conf/ranger-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.password</name>
@@ -33,13 +33,13 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/knox-server/conf/ranger-plugin-truststore.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.password</name>
@@ -49,18 +49,18 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-security.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-security.xml
index 9bd1079..0f0d3a7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-security.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/KNOX/configuration/ranger-knox-security.xml
@@ -23,36 +23,42 @@
     <name>ranger.plugin.knox.service.name</name>
     <value>{{repo_name}}</value>
     <description>Name of the Ranger service containing policies for this Knox instance</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.knox.policy.source.impl</name>
     <value>org.apache.ranger.admin.client.RangerAdminJersey2RESTClient</value>
     <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.knox.policy.rest.url</name>
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>ranger.plugin.knox.policy.rest.ssl.config.file</name>
     <value>/usr/hdp/current/knox-server/conf/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.knox.policy.pollIntervalMs</name>
     <value>30000</value>
     <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.knox.policy.cache.dir</name>
     <value>/etc/ranger/{{repo_name}}/policycache</value>
     <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-policymgr-ssl.xml
index 5672f04..21658e7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-policymgr-ssl.xml
@@ -23,12 +23,12 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/storm-client/conf/ranger-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/storm-client/conf/ranger-plugin-truststore.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-security.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-security.xml
index f3d7530..8a3dd2e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-security.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/STORM/configuration/ranger-storm-security.xml
@@ -23,6 +23,6 @@
     <name>ranger.plugin.storm.policy.rest.ssl.config.file</name>
     <value>/usr/hdp/current/storm-client/conf/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-audit.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-audit.xml
index a6b1baa..8237f1c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-audit.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-audit.xml
@@ -23,7 +23,7 @@
     <name>xasecure.audit.is.enabled</name>
     <value>true</value>
     <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db</name>
@@ -39,19 +39,19 @@
         <name>xasecure.audit.destination.db</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.url</name>
     <value>{{audit_jdbc_url}}</value>
     <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.user</name>
     <value>{{xa_audit_db_user}}</value>
     <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.password</name>
@@ -61,25 +61,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.driver</name>
     <value>{{jdbc_driver}}</value>
     <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.credential.provider.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.batch.filespool.dir</name>
     <value>/var/log/hadoop/yarn/audit/db/spool</value>
     <description>/var/log/hadoop/yarn/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs</name>
@@ -95,7 +95,7 @@
         <name>xasecure.audit.destination.hdfs</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.dir</name>
@@ -107,13 +107,13 @@
         <name>xasecure.audit.destination.hdfs.dir</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
     <value>/var/log/hadoop/yarn/audit/hdfs/spool</value>
     <description>/var/log/hadoop/yarn/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr</name>
@@ -129,7 +129,7 @@
         <name>xasecure.audit.destination.solr</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.urls</name>
@@ -144,7 +144,7 @@
         <name>ranger.audit.solr.urls</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.zookeepers</name>
@@ -156,13 +156,13 @@
         <name>ranger.audit.solr.zookeepers</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
     <value>/var/log/hadoop/yarn/audit/solr/spool</value>
     <description>/var/log/hadoop/yarn/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.provider.summary.enabled</name>
@@ -172,6 +172,6 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-plugin-properties.xml
index 97867cc..1899d44 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-plugin-properties.xml
@@ -24,7 +24,7 @@
     <value>ambari-qa</value>
     <display-name>Policy user for YARN</display-name>
     <description>This user must be system user and also present at Ranger admin portal</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.rpc.protection</name>
@@ -33,7 +33,7 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>common.name.for.certificate</name>
@@ -42,7 +42,7 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger-yarn-plugin-enabled</name>
@@ -59,14 +59,14 @@
       <type>boolean</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_USERNAME</name>
     <value>yarn</value>
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_PASSWORD</name>
@@ -77,6 +77,6 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
index 5410104..6ad6e62 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-policymgr-ssl.xml
@@ -23,7 +23,7 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.password</name>
@@ -33,13 +33,13 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.password</name>
@@ -49,18 +49,18 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-security.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-security.xml
index 5f69962..3c0b29f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-security.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/configuration/ranger-yarn-security.xml
@@ -23,36 +23,42 @@
     <name>ranger.plugin.yarn.service.name</name>
     <value>{{repo_name}}</value>
     <description>Name of the Ranger service containing policies for this Yarn instance</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.yarn.policy.source.impl</name>
     <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
     <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.yarn.policy.rest.url</name>
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>ranger.plugin.yarn.policy.rest.ssl.config.file</name>
     <value>/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.yarn.policy.pollIntervalMs</name>
     <value>30000</value>
     <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.yarn.policy.cache.dir</name>
     <value>/etc/ranger/{{repo_name}}/policycache</value>
     <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index eb7ebc1..1fc8598 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -777,6 +777,40 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
         knox_port = services['configurations']["gateway-site"]["properties"]['gateway.port']
       putRangerAdminProperty('ranger.sso.providerurl', 'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port))
 
+    required_services = [
+      {'service_name': 'HDFS', 'config_type': 'ranger-hdfs-security'},
+      {'service_name': 'YARN', 'config_type': 'ranger-yarn-security'},
+      {'service_name': 'HBASE', 'config_type': 'ranger-hbase-security'},
+      {'service_name': 'HIVE', 'config_type': 'ranger-hive-security'},
+      {'service_name': 'KNOX', 'config_type': 'ranger-knox-security'},
+      {'service_name': 'KAFKA', 'config_type': 'ranger-kafka-security'},
+      {'service_name': 'RANGER_KMS','config_type': 'ranger-kms-security'},
+      {'service_name': 'STORM', 'config_type': 'ranger-storm-security'}
+    ]
+
+    # recommendation for ranger url for ranger-supported plugins
+    self.recommendRangerUrlConfigurations(configurations, services, required_services)
+
+  def recommendRangerUrlConfigurations(self, configurations, services, requiredServices):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    policymgr_external_url = ""
+    if 'admin-properties' in services['configurations'] and 'policymgr_external_url' in services['configurations']['admin-properties']['properties']:
+      if 'admin-properties' in configurations and 'policymgr_external_url' in configurations['admin-properties']['properties']:
+        policymgr_external_url = configurations['admin-properties']['properties']['policymgr_external_url']
+      else:
+        policymgr_external_url = services['configurations']['admin-properties']['properties']['policymgr_external_url']
+
+    for index in range(len(requiredServices)):
+      if requiredServices[index]['service_name'] in servicesList:
+        component_config_type = requiredServices[index]['config_type']
+        component_name = requiredServices[index]['service_name']
+        component_config_property = 'ranger.plugin.{0}.policy.rest.url'.format(component_name.lower())
+        if requiredServices[index]['service_name'] == 'RANGER_KMS':
+          component_config_property = 'ranger.plugin.kms.policy.rest.url'
+        putRangerSecurityProperty = self.putProperty(configurations, component_config_type, services)
+        if component_config_type in services["configurations"] and component_config_property in services["configurations"][component_config_type]["properties"]:
+          putRangerSecurityProperty(component_config_property, policymgr_external_url)
 
   def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP23StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-audit.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-audit.xml
index 36677a1..efeea5f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-audit.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-audit.xml
@@ -78,7 +78,7 @@
         <name>xasecure.audit.destination.solr</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -94,7 +94,7 @@
         <name>ranger.audit.solr.urls</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -107,7 +107,7 @@
         <name>ranger.audit.solr.zookeepers</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml
index fd623cb..977127c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-plugin-properties.xml
@@ -26,7 +26,7 @@
     <display-name>Policy user for Atlas</display-name>
     <description>This user must be system user and also present at Ranger
       admin portal</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -36,7 +36,7 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -54,7 +54,7 @@
       <type>boolean</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -63,7 +63,7 @@
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin
     </description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -76,7 +76,55 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-policymgr-ssl.xml
index dcffb63..349c829 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-policymgr-ssl.xml
@@ -24,7 +24,7 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/atlas-server/conf/ranger-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -35,14 +35,14 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/atlas-server/conf/ranger-plugin-truststore.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
@@ -53,21 +53,21 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
     <property>
     <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java keystore credential file</description>
-      <on-ambari-upgrade add="false"/>
+      <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-security.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-security.xml
index ea0a026..c5588d1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-security.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/ATLAS/configuration/ranger-atlas-security.xml
@@ -23,49 +23,55 @@
     <name>ranger.plugin.atlas.service.name</name>
     <value>{{repo_name}}</value>
     <description>Name of the Ranger service containing Atlas policies</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ranger.plugin.atlas.policy.source.impl</name>
     <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
     <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ranger.plugin.atlas.policy.rest.url</name>
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
     <name>ranger.plugin.atlas.policy.rest.ssl.config.file</name>
     <value>/usr/hdp/current/atlas-server/conf/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ranger.plugin.atlas.policy.pollIntervalMs</name>
     <value>30000</value>
     <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>ranger.plugin.atlas.policy.cache.dir</name>
     <value>/etc/ranger/{{repo_name}}/policycache</value>
     <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
   <property>
     <name>xasecure.add-hadoop-authorization</name>
     <value>true</value>
     <description>Enable/Disable the default hadoop authorization (based on rwxrwxrwx permission on the resource) if Ranger Authorization fails.</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HBASE/configuration/ranger-hbase-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
new file mode 100644
index 0000000..3450970
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
index 77f7be2..953e42e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
@@ -26,6 +26,54 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false" />
+    <on-ambari-upgrade add="true" />
+  </property>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/ranger-hive-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/ranger-hive-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/ranger-hive-plugin-properties.xml
new file mode 100644
index 0000000..3450970
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/ranger-hive-plugin-properties.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/ranger-hive-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/ranger-hive-security.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/ranger-hive-security.xml
index 5bc8c9c..7f0bb99 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/ranger-hive-security.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/ranger-hive-security.xml
@@ -23,6 +23,6 @@
     <name>ranger.plugin.hive.policy.rest.ssl.config.file</name>
     <value>{{stack_root}}/current/{{ranger_hive_component}}/conf/conf.server/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/configuration/ranger-kafka-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/configuration/ranger-kafka-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/configuration/ranger-kafka-plugin-properties.xml
new file mode 100644
index 0000000..3450970
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/KAFKA/configuration/ranger-kafka-plugin-properties.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+</configuration>
\ No newline at end of file


[34/50] [abbrv] ambari git commit: AMBARI-19596. ZKFC version shows incorrect in DB tables upon downgrade (ncole)

Posted by nc...@apache.org.
AMBARI-19596. ZKFC version shows incorrect in DB tables upon downgrade (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b2ba7ddc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b2ba7ddc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b2ba7ddc

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b2ba7ddc8fe7380c49a39285f68918492cacf6f1
Parents: 4d44269
Author: Nate Cole <nc...@hortonworks.com>
Authored: Tue Jan 17 14:15:08 2017 -0500
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Tue Jan 17 16:53:39 2017 -0500

----------------------------------------------------------------------
 .../internal/UpgradeResourceProvider.java       | 11 +++-
 .../listeners/upgrade/StackVersionListener.java |  2 +-
 .../ambari/server/state/UpgradeHelper.java      | 44 +++++++++++--
 .../internal/UpgradeResourceProviderTest.java   | 69 +++++++++++++++++++-
 .../HDP/2.1.1/services/STORM/metainfo.xml       |  1 +
 .../HDP/2.2.0/services/STORM/metainfo.xml       | 37 +++++++++++
 6 files changed, 153 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b2ba7ddc/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 6f8ebb7..2ec43cf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -37,6 +37,8 @@ import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
@@ -859,6 +861,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     // TODO: for now, all service components are transitioned to upgrading state
     // TODO: When performing patch upgrade, we should only target supported services/components
     // from upgrade pack
+    @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
     Set<Service> services = new HashSet<>(cluster.getServices().values());
     Map<Service, Set<ServiceComponent>> targetComponents = new HashMap<>();
     for (Service service: services) {
@@ -866,9 +869,11 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         new HashSet<>(service.getServiceComponents().values());
       targetComponents.put(service, serviceComponents);
     }
-    // TODO: is there any extreme case when we need to set component upgrade state back to NONE
-    // from IN_PROGRESS (e.g. canceled downgrade)
-    s_upgradeHelper.putComponentsToUpgradingState(version, targetComponents);
+
+    // !!! determine which stack to check for component isAdvertised
+    StackId componentStack = upgradeContext.getDirection() == Direction.UPGRADE ?
+        upgradeContext.getTargetStackId() : upgradeContext.getOriginalStackId();
+    s_upgradeHelper.putComponentsToUpgradingState(version, targetComponents, componentStack);
 
     for (UpgradeGroupHolder group : groups) {
       boolean skippable = group.skippable;

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2ba7ddc/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
index f5a5b0c..3179bd4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
@@ -58,7 +58,7 @@ public class StackVersionListener {
    * Logger.
    */
   private final static Logger LOG = LoggerFactory.getLogger(StackVersionListener.class);
-  private static final String UNKNOWN_VERSION = State.UNKNOWN.toString();
+  public static final String UNKNOWN_VERSION = State.UNKNOWN.toString();
 
   /**
    * Used to prevent multiple threads from trying to create host alerts

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2ba7ddc/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 52bf428..ee5451f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -45,6 +45,7 @@ import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
 import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.events.listeners.upgrade.StackVersionListener;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.stack.HostsType;
@@ -773,19 +774,50 @@ public class UpgradeHelper {
    *          desired version (like 2.2.1.0-1234) for upgrade
    * @param targetServices
    *          targets for upgrade
+   * @param targetStack
+   *          the target stack for the components.  Express and Rolling upgrades determine
+   *          the "correct" stack differently, so the component's desired stack id is not
+   *          a reliable indicator.
    */
   @Transactional
   public void putComponentsToUpgradingState(String version,
-                                            Map<Service, Set<ServiceComponent>> targetServices) throws AmbariException {
-    // TODO: generalize method?
+      Map<Service, Set<ServiceComponent>> targetServices, StackId targetStack) throws AmbariException {
+
     for (Map.Entry<Service, Set<ServiceComponent>> entry: targetServices.entrySet()) {
       for (ServiceComponent serviceComponent: entry.getValue()) {
-        if (serviceComponent.isVersionAdvertised()) {
-          for (ServiceComponentHost serviceComponentHost: serviceComponent.getServiceComponentHosts().values()) {
-            serviceComponentHost.setUpgradeState(UpgradeState.IN_PROGRESS);
+
+        boolean versionAdvertised = false;
+        try {
+          ComponentInfo ci = m_ambariMetaInfo.get().getComponent(targetStack.getStackName(),
+              targetStack.getStackVersion(), serviceComponent.getServiceName(),
+              serviceComponent.getName());
+
+          versionAdvertised = ci.isVersionAdvertised();
+        } catch (AmbariException e) {
+          LOG.warn("Component {}/{} doesn't exist for stack {}.  Setting version to {}",
+              serviceComponent.getServiceName(), serviceComponent.getName(), targetStack,
+              StackVersionListener.UNKNOWN_VERSION);
+        }
+
+        UpgradeState upgradeState = UpgradeState.IN_PROGRESS;
+        String desiredVersion = version;
+
+        if (!versionAdvertised) {
+          upgradeState = UpgradeState.NONE;
+          desiredVersion = StackVersionListener.UNKNOWN_VERSION;
+        }
+
+        for (ServiceComponentHost serviceComponentHost: serviceComponent.getServiceComponentHosts().values()) {
+          serviceComponentHost.setUpgradeState(upgradeState);
+
+          // !!! if we aren't version advertised, but there IS a version, set it.
+          if (!versionAdvertised &&
+              !serviceComponentHost.getVersion().equals(StackVersionListener.UNKNOWN_VERSION)) {
+            serviceComponentHost.setVersion(StackVersionListener.UNKNOWN_VERSION);
           }
-          serviceComponent.setDesiredVersion(version);
         }
+        serviceComponent.setDesiredVersion(desiredVersion);
+
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2ba7ddc/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index a702e6f..4d95e2b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -96,6 +96,8 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeHelper;
+import org.apache.ambari.server.state.UpgradeState;
 import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
@@ -1464,7 +1466,6 @@ public class UpgradeResourceProviderTest {
    */
   @Test()
   public void testCreateHostOrderedUpgradeThrowsExceptions() throws Exception {
-    Cluster cluster = clusters.getCluster("c1");
 
     Map<String, Object> requestProps = new HashMap<String, Object>();
     requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
@@ -1509,6 +1510,72 @@ public class UpgradeResourceProviderTest {
     upgradeResourceProvider.createResources(request);
   }
 
+  /**
+   * Exercises that a component that goes from upgrade->downgrade that switches
+   * {@code versionAdvertised} between will go to UKNOWN.  This exercises
+   * {@link UpgradeHelper#putComponentsToUpgradingState(String, Map, StackId)}
+   * @throws Exception
+   */
+  @Test
+  public void testCreateUpgradeDowngradeCycleAdvertisingVersion() throws Exception {
+    Cluster cluster = clusters.getCluster("c1");
+    Service service = cluster.addService("STORM");
+    service.setDesiredStackVersion(cluster.getDesiredStackVersion());
+
+    ServiceComponent component = service.addServiceComponent("DRPC_SERVER");
+    ServiceComponentHost sch = component.addServiceComponentHost("h1");
+    sch.setVersion("2.1.1.0");
+
+    ResourceProvider upgradeResourceProvider = createProvider(amc);
+
+    Map<String, Object> requestProps = new HashMap<>();
+    requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_VERSION, "2.2.0.0");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_FROM_VERSION, "2.1.1.0");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name());
+
+    Map<String, String> requestInfoProperties = new HashMap<>();
+
+    Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), requestInfoProperties);
+
+    RequestStatus status = upgradeResourceProvider.createResources(request);
+    assertEquals(1, status.getAssociatedResources().size());
+
+    Resource r = status.getAssociatedResources().iterator().next();
+    String id = r.getPropertyValue("Upgrade/request_id").toString();
+
+    component = service.getServiceComponent("DRPC_SERVER");
+    assertNotNull(component);
+    assertEquals("2.2.0.0", component.getDesiredVersion());
+
+    ServiceComponentHost hostComponent = component.getServiceComponentHost("h1");
+    assertEquals(UpgradeState.IN_PROGRESS, hostComponent.getUpgradeState());
+
+    // !!! can't start a downgrade until cancelling the previous upgrade
+    abortUpgrade(Long.parseLong(id));
+
+    requestProps.clear();
+    requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_VERSION, "2.1.1.0");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_FROM_VERSION, "2.2.0.0");
+    requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.DOWNGRADE.name());
+
+    request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), requestInfoProperties);
+    status = upgradeResourceProvider.createResources(request);
+
+    component = service.getServiceComponent("DRPC_SERVER");
+    assertNotNull(component);
+    assertEquals("UNKNOWN", component.getDesiredVersion());
+
+    hostComponent = component.getServiceComponentHost("h1");
+    assertEquals(UpgradeState.NONE, hostComponent.getUpgradeState());
+    assertEquals("UNKNOWN", hostComponent.getVersion());
+  }
+
 
   private String parseSingleMessage(String msgStr){
     JsonParser parser = new JsonParser();

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2ba7ddc/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/STORM/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/STORM/metainfo.xml
index 79a3130..d63aa1b 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/STORM/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/services/STORM/metainfo.xml
@@ -72,6 +72,7 @@
           <name>DRPC_SERVER</name>
           <displayName>DRPC Server</displayName>
           <category>MASTER</category>
+          <versionAdvertised>false</versionAdvertised>
           <commandScript>
             <script>scripts/drpc_server.py</script>
             <scriptType>PYTHON</scriptType>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b2ba7ddc/ambari-server/src/test/resources/stacks/HDP/2.2.0/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.2.0/services/STORM/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.2.0/services/STORM/metainfo.xml
new file mode 100644
index 0000000..6075ba3
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.2.0/services/STORM/metainfo.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <comment>Apache Hadoop Stream processing framework</comment>
+      <version>0.9.0.1</version>
+      <components>
+
+        <component>
+          <name>DRPC_SERVER</name>
+          <displayName>DRPC Server</displayName>
+          <category>MASTER</category>
+          <versionAdvertised>true</versionAdvertised>
+        </component>
+      </components>
+    </service>
+  </services>
+</metainfo>        
\ No newline at end of file


[04/50] [abbrv] ambari git commit: AMBARI-19563. Dashboard-Show YARN RM URL in actions and flow graph and organize tabs. (Padma Priya N via gauravn7)

Posted by nc...@apache.org.
AMBARI-19563. Dashboard-Show YARN RM URL in actions and flow graph and organize tabs. (Padma Priya N via gauravn7)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b4bb42a7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b4bb42a7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b4bb42a7

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b4bb42a7dbbe08287d09cec6cef64f43f0d43f2f
Parents: 684c9e6
Author: Gaurav Nagar <gr...@gmail.com>
Authored: Tue Jan 17 14:55:45 2017 +0530
Committer: Gaurav Nagar <gr...@gmail.com>
Committed: Tue Jan 17 14:55:45 2017 +0530

----------------------------------------------------------------------
 .../resources/ui/app/components/job-details.js  | 29 +++++++++++++++++---
 .../ui/app/components/workflow-job-details.js   |  3 ++
 .../ui/app/templates/components/job-details.hbs | 20 +++++++-------
 .../components/workflow-job-action-info.hbs     |  8 ------
 .../components/workflow-job-details.hbs         | 14 ++++++++++
 5 files changed, 52 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b4bb42a7/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
index 32f5ef4..91e1ce3 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/job-details.js
@@ -329,7 +329,21 @@ export default Ember.Component.extend({
         this.sendAction('close');
       },
       doRefresh : function(){
-        this.sendAction('doRefresh');
+        var tab = this.$(this.get('currentTab')).prop('href');
+        var currentTabHref = tab.substr(tab.indexOf('#')+1);
+        if(currentTabHref === 'jobLog'){
+          this.send('getJobLog', this.get('logParams'));
+        }else if(currentTabHref === 'jobErrorLog'){
+          this.send('getErrorLog');
+        }else if(currentTabHref === 'jobAuditLog'){
+          this.send('getAuditLog');
+        }else if(currentTabHref === 'jobDag'){
+          this.send('getJobDag');
+        }else if(currentTabHref === 'coordActionReruns'){
+          this.send('getCoordActionReruns');
+        }else{
+          this.sendAction('doRefresh');
+        }
       },
       getJobDefinition : function () {
         Ember.$.get(Ember.ENV.API_URL+'/v2/job/'+this.get('id')+'?show=definition&timezone=GMT',function(response){
@@ -342,6 +356,7 @@ export default Ember.Component.extend({
         this.set('model.actionDetails', this.get('model.actions')[0]);
       },
       getJobLog : function (params){
+        this.set('logParams', params);
         var url = Ember.ENV.API_URL+'/v2/job/'+this.get('id')+'?show=log';
         if(params && params.logFilter){
           url = url + '&logfilter=' + params.logFilter;
@@ -350,7 +365,9 @@ export default Ember.Component.extend({
           url = url + '&type=action&scope='+ params.logActionList;
         }
         Ember.$.get(url,function(response){
-          response = response.trim().length > 0 ? response : "No messages present";
+          if(Ember.isBlank(response)){
+            response = 'No Logs';
+          }
           this.set('model.jobLog', response);
         }.bind(this)).fail(function(error){
           this.set('error', error);
@@ -358,7 +375,9 @@ export default Ember.Component.extend({
       },
       getErrorLog : function (){
         Ember.$.get(Ember.ENV.API_URL+'/v2/job/'+this.get('id')+'?show=errorlog',function(response){
-          response = response.trim().length > 0 ? response : "No messages present";
+          if(Ember.isBlank(response)){
+            response = 'No Errors';
+          }
           this.set('model.errorLog', response);
         }.bind(this)).fail(function(error){
           this.set('error', error);
@@ -366,7 +385,9 @@ export default Ember.Component.extend({
       },
       getAuditLog : function (){
         Ember.$.get(Ember.ENV.API_URL+'/v2/job/'+this.get('id')+'?show=auditlog',function(response){
-          response = response.trim().length > 0 ? response : "No messages present";
+          if(Ember.isBlank(response)){
+            response = 'No Logs';
+          }
           this.set('model.auditLog', response);
         }.bind(this)).fail(function(error){
           this.set('error', error);

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4bb42a7/contrib/views/wfmanager/src/main/resources/ui/app/components/workflow-job-details.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/workflow-job-details.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/workflow-job-details.js
index 4873a31..7a868fa 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/workflow-job-details.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/workflow-job-details.js
@@ -26,6 +26,9 @@ export default Ember.Component.extend({
     },
     getActionDetails(action){
       this.sendAction('getActionDetails',action);
+    },
+    openConsoleUrl(url){
+      window.open(url, '_blank');
     }
   }
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4bb42a7/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
index 32e468f..c783ad0 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-details.hbs
@@ -31,19 +31,19 @@
           {{#if fromBundleId}}
             <li class="breadcrumb-item">
               <a href="#" {{action 'back' 'bundles' fromBundleId}}>
-                {{#workflow-icon workflowType="bundles"}}{{/workflow-icon}} [ <span title="{{fromBundleId}}">{{dashboardContext.currentBundleName}}</span> ]
+                {{#workflow-icon workflowType="bundles"}}{{/workflow-icon}}  <span title="{{fromBundleId}}">{{dashboardContext.currentBundleName}}</span> 
               </a>
             </li>
           {{/if}}
           {{#if fromCoordId}}
             <li class="breadcrumb-item">
               <a href="#" {{action 'back' 'coords' fromCoordId}}>
-                {{#workflow-icon workflowType="coords"}}{{/workflow-icon}} [ <span title="{{fromCoordId}}">{{dashboardContext.currentCoordName}}</span> ]
+                {{#workflow-icon workflowType="coords"}}{{/workflow-icon}}  <span title="{{fromCoordId}}">{{dashboardContext.currentCoordName}}</span>
               </a>
             </li>
           {{/if}}
           <li class="breadcrumb-item">
-            {{#workflow-icon workflowType=model.jobType}}{{/workflow-icon}} [ <span title="{{id}}">{{name}}</span> ]
+            {{#workflow-icon workflowType=model.jobType}}{{/workflow-icon}}  <span title="{{id}}">{{name}}</span>
           </li>
         </ol>
       </div>
@@ -60,12 +60,12 @@
           <ul class="nav nav-tabs" role="tablist">
             <li role="presentation" class="active"><a href="#jobInfo" aria-controls="job-info" role="tab" data-toggle="tab">Info</a></li>
             <li {{action 'showFirstActionDetail'}} role="presentation"><a href="#jobAction" aria-controls="job-action" role="tab" data-toggle="tab">Action</a></li>
-            <li {{action 'getJobDefinition'}} role="presentation"><a href="#jobDefinition" aria-controls="jobDefinition" role="tab" data-toggle="tab">Definition</a></li>
-            <li role="presentation"><a href="#jobConfig" aria-controls="jobConfig" role="tab" data-toggle="tab">Configuration</a></li>
+            <li {{action 'getJobDag'}} role="presentation"><a href="#jobDag" aria-controls="jobDag" role="tab" data-toggle="tab">Flow Graph</a></li>
             <li {{action 'getJobLog'}} role="presentation"><a href="#jobLog" aria-controls="jobLog" role="tab" data-toggle="tab">Log</a></li>
             <li {{action 'getErrorLog'}} role="presentation"><a href="#jobErrorLog" aria-controls="jobErrorLog" role="tab" data-toggle="tab">Error Log</a></li>
             <li {{action 'getAuditLog'}} role="presentation"><a href="#jobAuditLog" aria-controls="jobAuditLog" role="tab" data-toggle="tab">Audit Log</a></li>
-            <li {{action 'getJobDag'}} role="presentation"><a href="#jobDag" aria-controls="jobDag" role="tab" data-toggle="tab">Flow Graph</a></li>
+            <li role="presentation"><a href="#jobConfig" aria-controls="jobConfig" role="tab" data-toggle="tab">Configuration</a></li>
+            <li {{action 'getJobDefinition'}} role="presentation"><a href="#jobDefinition" aria-controls="jobDefinition" role="tab" data-toggle="tab">Definition</a></li>
             <li role="presentation" class="pull-right">
 				<button type="button" class="btn btn-success" title="Edit Workflow" {{action "editWorkflow" model.appPath}}>
 				   Edit Workflow
@@ -81,12 +81,12 @@
           <ul class="nav nav-tabs" role="tablist">
             <li role="presentation" class="active"><a href="#jobInfo" aria-controls="job-info" role="tab" data-toggle="tab">Info</a></li>
             <li {{action 'showFirstActionDetail'}} role="presentation"><a href="#jobAction" aria-controls="job-action" role="tab" data-toggle="tab">Workflow Jobs</a></li>
-            <li {{action 'getJobDefinition'}} role="presentation"><a href="#jobDefinition" aria-controls="jobDefinition" role="tab" data-toggle="tab">Definition</a></li>
-            <li role="presentation"><a href="#jobConfig" aria-controls="jobConfig" role="tab" data-toggle="tab">Configuration</a></li>
             <li {{action 'getJobLog'}} role="presentation"><a href="#jobLog" aria-controls="jobLog" role="tab" data-toggle="tab">Log</a></li>
             <li {{action 'getErrorLog'}} role="presentation"><a href="#jobErrorLog" aria-controls="jobErrorLog" role="tab" data-toggle="tab">Error Log</a></li>
             <li {{action 'getAuditLog'}} role="presentation"><a href="#jobAuditLog" aria-controls="jobAuditLog" role="tab" data-toggle="tab">Audit Log</a></li>
             <li {{action 'getCoordActionReruns'}} role="presentation"><a href="#coordActionReruns" aria-controls="coordActionReruns" role="tab" data-toggle="tab">Action Reruns</a></li>
+            <li role="presentation"><a href="#jobConfig" aria-controls="jobConfig" role="tab" data-toggle="tab">Configuration</a></li>
+            <li {{action 'getJobDefinition'}} role="presentation"><a href="#jobDefinition" aria-controls="jobDefinition" role="tab" data-toggle="tab">Definition</a></li>
           </ul>
         </div>
       {{/coord-job-details}}
@@ -96,11 +96,11 @@
           <ul class="nav nav-tabs" role="tablist">
             <li role="presentation" class="active"><a href="#jobInfo" aria-controls="job-info" role="tab" data-toggle="tab">Info</a></li>
             <li role="presentation"><a href="#coordJobs" aria-controls="coord-jobs" role="tab" data-toggle="tab">Coord Jobs</a></li>
-            <li {{action 'getJobDefinition'}} role="presentation"><a href="#jobDefinition" aria-controls="jobDefinition" role="tab" data-toggle="tab">Definition</a></li>
-            <li role="presentation"><a href="#jobConfig" aria-controls="jobConfig" role="tab" data-toggle="tab">Configuration</a></li>
             <li {{action 'getJobLog'}} role="presentation"><a href="#jobLog" aria-controls="jobLog" role="tab" data-toggle="tab">Log</a></li>
             <li {{action 'getErrorLog'}} role="presentation"><a href="#jobErrorLog" aria-controls="jobErrorLog" role="tab" data-toggle="tab">Error Log</a></li>
             <li {{action 'getAuditLog'}} role="presentation"><a href="#jobAuditLog" aria-controls="jobAuditLog" role="tab" data-toggle="tab">Audit Log</a></li>
+            <li role="presentation"><a href="#jobConfig" aria-controls="jobConfig" role="tab" data-toggle="tab">Configuration</a></li>
+            <li {{action 'getJobDefinition'}} role="presentation"><a href="#jobDefinition" aria-controls="jobDefinition" role="tab" data-toggle="tab">Definition</a></li>
           </ul>
         </div>
       {{/bundle-job-details}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4bb42a7/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-action-info.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-action-info.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-action-info.hbs
index 8d1e09c..51986ce 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-action-info.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-action-info.hbs
@@ -60,14 +60,6 @@
         <div class="col-md-10">{{actionInfo.externalStatus}}</div>
       </div>
       <div class="col-md-12">
-        <div class="col-md-2">Console URL</div>
-        {{#unless (eq "-" actionInfo.consoleUrl)}}
-        <div class="col-md-10"><a target="_blank" href="{{actionInfo.consoleUrl}}">{{actionInfo.consoleUrl}}</a></div>
-        {{else}}
-        <div class="col-md-10">{{actionInfo.consoleUrl}}</div>
-        {{/unless}}
-      </div>
-      <div class="col-md-12">
         <div class="col-md-2">Tracker URI</div>
         {{#unless (eq "-" actionInfo.trackerUri)}}
         <div class="col-md-10"><a target="_blank" href="{{actionInfo.trackerUri}}">{{actionInfo.trackerUri}}</a></div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4bb42a7/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
index 114ca37..89997aa 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/workflow-job-details.hbs
@@ -79,6 +79,7 @@
           <th>Transition</th>
           <th>Start Time</th>
           <th>End Time</th>
+          <th>Job Url</th>
         </tr>
       </thead>
       <tbody>
@@ -90,6 +91,11 @@
           <td>{{actionInfo.transition}}</td>
           <td>{{actionInfo.startTime}}</td>
           <td>{{actionInfo.endTime}}</td>
+          {{#unless (eq "-" actionInfo.consoleUrl)}}
+            <td><a target="_blank" href="#" {{action 'openConsoleUrl'  actionInfo.consoleUrl preventDefault=true}}><i class="fa fa-external-link" aria-hidden="true"></i></a></td>
+          {{else}}
+            <td>{{actionInfo.consoleUrl}}</td>
+          {{/unless}}
         </tr>
         {{/each}}
       </tbody>
@@ -219,6 +225,14 @@
                   <div class="col-md-4 text-bold">End Time</div>
                   <div class="col-md-8">{{model.actionInfo.endTime}}</div>
                 </div>
+                <div class="row">
+                  <div class="col-md-4 text-bold">External ID</div>
+                  <div class="col-md-8">{{model.actionInfo.externalId}}</div>
+                </div>
+                <div class="row">
+                  <div class="col-md-4 text-bold">Job URL</div>
+                  <div class="col-md-8"><a target="_blank" href="{{model.actionInfo.consoleUrl}}"><i class="fa fa-external-link" aria-hidden="true"></i></a></div>
+                </div>
                 <div class="row pull-right" {{action 'getActionDetails' model.actionInfo}}>
                   <div class="col-md-12">
                     <a data-toggle="modal" href="#" data-target="#actionDetailsModal">More</a>


[45/50] [abbrv] ambari git commit: Revert "AMBARI-19331. Setup correct authentication and authorization mechanism between Yarn and Zookeeper (Attila Magyar via rlevas)"

Posted by nc...@apache.org.
Revert "AMBARI-19331. Setup correct authentication and authorization mechanism between Yarn and Zookeeper (Attila Magyar via rlevas)"

This reverts commit e96dee0fecfafeda637c339217b2746b337f729f.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ed93a562
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ed93a562
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ed93a562

Branch: refs/heads/branch-dev-patch-upgrade
Commit: ed93a5628453265943dcde5d1abbea1130376174
Parents: 189fae5
Author: Toader, Sebastian <st...@hortonworks.com>
Authored: Wed Jan 18 11:00:40 2017 +0100
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Wed Jan 18 11:00:40 2017 +0100

----------------------------------------------------------------------
 .../YARN/2.1.0.2.0/package/scripts/params_linux.py               | 2 --
 .../YARN/2.1.0.2.0/package/scripts/resourcemanager.py            | 2 --
 .../main/resources/common-services/YARN/3.0.0.3.0/kerberos.json  | 4 +---
 .../YARN/3.0.0.3.0/package/scripts/params_linux.py               | 2 --
 .../YARN/3.0.0.3.0/package/scripts/resourcemanager.py            | 2 --
 .../main/resources/stacks/HDP/2.2/services/YARN/kerberos.json    | 4 +---
 .../resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json     | 4 +---
 .../main/resources/stacks/HDP/2.3/services/YARN/kerberos.json    | 4 +---
 .../main/resources/stacks/HDP/2.5/services/YARN/kerberos.json    | 4 +---
 9 files changed, 5 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ed93a562/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index 653fa0a..7df82bf 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -256,8 +256,6 @@ nodemanager_kinit_cmd = ""
 rm_zk_address = config['configurations']['yarn-site']['yarn.resourcemanager.zk-address']
 rm_zk_znode = config['configurations']['yarn-site']['yarn.resourcemanager.zk-state-store.parent-path']
 rm_zk_store_class = config['configurations']['yarn-site']['yarn.resourcemanager.store.class']
-rm_zk_failover_znode = default('/configurations/yarn-site/yarn.resourcemanager.ha.automatic-failover.zk-base-path', '/yarn-leader-election')
-hadoop_registry_zk_root = default('/configurations/yarn-site/hadoop.registry.zk.root', '/registry')
 
 if security_enabled:
   rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed93a562/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index f2e6660..77bd363 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -238,8 +238,6 @@ class ResourcemanagerDefault(Resourcemanager):
       params.yarn_jaas_file, \
       params.yarn_user)
     zkmigrator.set_acls(params.rm_zk_znode, 'world:anyone:crdwa')
-    zkmigrator.set_acls(params.rm_zk_failover_znode, 'world:anyone:crdwa')
-    zkmigrator.set_acls(params.hadoop_registry_zk_root, 'world:anyone:crdwa')
 
   def wait_for_dfs_directories_created(self, *dirs):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed93a562/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
index fb85e7a..eaffec6 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
@@ -32,9 +32,7 @@
             "yarn.resourcemanager.proxyuser.*.hosts": "",
             "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda"
           }
         },
         {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed93a562/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
index 236b132..96b227b 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/params_linux.py
@@ -255,8 +255,6 @@ nodemanager_kinit_cmd = ""
 rm_zk_address = config['configurations']['yarn-site']['yarn.resourcemanager.zk-address']
 rm_zk_znode = config['configurations']['yarn-site']['yarn.resourcemanager.zk-state-store.parent-path']
 rm_zk_store_class = config['configurations']['yarn-site']['yarn.resourcemanager.store.class']
-rm_zk_failover_znode = default('/configurations/yarn-site/yarn.resourcemanager.ha.automatic-failover.zk-base-path', '/yarn-leader-election')
-hadoop_registry_zk_root = default('/configurations/yarn-site/hadoop.registry.zk.root', '/registry')
 
 if security_enabled:
   rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed93a562/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
index 91d7b89..657a020 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/package/scripts/resourcemanager.py
@@ -122,8 +122,6 @@ class ResourcemanagerDefault(Resourcemanager):
       params.yarn_jaas_file, \
       params.yarn_user)
     zkmigrator.set_acls(params.rm_zk_znode, 'world:anyone:crdwa')
-    zkmigrator.set_acls(params.rm_zk_failover_znode, 'world:anyone:crdwa')
-    zkmigrator.set_acls(params.hadoop_registry_zk_root, 'world:anyone:crdwa')
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed93a562/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
index 85a3221..a8ef83c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
@@ -32,9 +32,7 @@
             "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda"
           }
         },
         {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed93a562/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
index e27513a..3059f14 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
@@ -34,9 +34,7 @@
             "yarn.resourcemanager.proxyuser.*.hosts": "",
             "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda"
           }
         },
         {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed93a562/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
index bf0280b..5fff05c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
@@ -32,9 +32,7 @@
             "yarn.resourcemanager.proxyuser.*.hosts": "",
             "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda"
           }
         },
         {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ed93a562/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
index fb85e7a..eaffec6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
@@ -32,9 +32,7 @@
             "yarn.resourcemanager.proxyuser.*.hosts": "",
             "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda"
           }
         },
         {


[11/50] [abbrv] ambari git commit: AMBARI-19044 Install & configure Ranger plugin components independently of Ranger admin components (mugdha)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/KNOX/configuration/ranger-knox-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/KNOX/configuration/ranger-knox-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/KNOX/configuration/ranger-knox-plugin-properties.xml
new file mode 100644
index 0000000..3450970
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/KNOX/configuration/ranger-knox-plugin-properties.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-policymgr-ssl.xml
index 5672f04..21658e7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-policymgr-ssl.xml
@@ -23,12 +23,12 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>/usr/hdp/current/storm-client/conf/ranger-plugin-keystore.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>/usr/hdp/current/storm-client/conf/ranger-plugin-truststore.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-security.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-security.xml
index f3d7530..8a3dd2e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-security.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/STORM/configuration/ranger-storm-security.xml
@@ -23,6 +23,6 @@
     <name>ranger.plugin.storm.policy.rest.ssl.config.file</name>
     <value>/usr/hdp/current/storm-client/conf/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/configuration/ranger-yarn-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/configuration/ranger-yarn-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/configuration/ranger-yarn-plugin-properties.xml
new file mode 100644
index 0000000..3450970
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/configuration/ranger-yarn-plugin-properties.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 2ad35a2..afe9fea 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1978,6 +1978,13 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
         putAtlasRangerAuditProperty('xasecure.audit.destination.hdfs',xasecure_audit_destination_hdfs)
         putAtlasRangerAuditProperty('xasecure.audit.destination.hdfs.dir',xasecure_audit_destination_hdfs_dir)
         putAtlasRangerAuditProperty('xasecure.audit.destination.solr',xasecure_audit_destination_solr)
+    required_services = [
+      {'service_name': 'ATLAS', 'config_type': 'ranger-atlas-security'}
+    ]
+
+    # recommendation for ranger url for ranger-supported plugins
+    self.recommendRangerUrlConfigurations(configurations, services, required_services)
+
 
   def validateRangerTagsyncConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     ranger_tagsync_properties = properties

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index 1830f24..484a09a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -284,6 +284,7 @@ public class UpgradeCatalog250Test {
     Method updateAtlasConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAtlasConfigs");
     Method updateLogSearchConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateLogSearchConfigs");
     Method updateAmbariInfraConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAmbariInfraConfigs");
+    Method updateRangerUrlConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateRangerUrlConfigs");
 
     UpgradeCatalog250 upgradeCatalog250 = createMockBuilder(UpgradeCatalog250.class)
         .addMockedMethod(updateAmsConfigs)
@@ -298,6 +299,7 @@ public class UpgradeCatalog250Test {
         .addMockedMethod(updateAtlasConfigs)
         .addMockedMethod(updateLogSearchConfigs)
         .addMockedMethod(updateAmbariInfraConfigs)
+        .addMockedMethod(updateRangerUrlConfigs)
         .createMock();
 
     upgradeCatalog250.updateAMSConfigs();
@@ -333,6 +335,9 @@ public class UpgradeCatalog250Test {
     upgradeCatalog250.updateAmbariInfraConfigs();
     expectLastCall().once();
 
+    upgradeCatalog250.updateRangerUrlConfigs();
+    expectLastCall().once();
+
     upgradeCatalog250.addManageServiceAutoStartPermissions();
     expectLastCall().once();
 
@@ -1128,4 +1133,109 @@ public class UpgradeCatalog250Test {
     Assert.assertTrue(clusterAdministratorAuthorizations.contains(clusterRunCustomCommandEntity));
   }
 
+  @Test
+  public void testUpdateRangerUrlConfigs() throws Exception {
+    Map<String, String> oldHdfsProperties = new HashMap<String, String>();
+    Map<String, String> newHdfsProperties = new HashMap<String, String>();
+    oldHdfsProperties.put("ranger.plugin.hdfs.policy.rest.url", "{{policymgr_mgr_url}}");
+    newHdfsProperties.put("ranger.plugin.hdfs.policy.rest.url", "http://localhost:6080");
+    testUpdateRangerUrl(oldHdfsProperties, newHdfsProperties, "ranger-hdfs-security");
+
+    Map<String, String> oldHiveProperties = new HashMap<String, String>();
+    Map<String, String> newHiveProperties = new HashMap<String, String>();
+    oldHiveProperties.put("ranger.plugin.hive.policy.rest.url", "{{policymgr_mgr_url}}");
+    newHiveProperties.put("ranger.plugin.hive.policy.rest.url", "http://localhost:6080");
+    testUpdateRangerUrl(oldHiveProperties, newHiveProperties, "ranger-hive-security");
+
+    Map<String, String> oldHbaseProperties = new HashMap<String, String>();
+    Map<String, String> newHbaseProperties = new HashMap<String, String>();
+    oldHbaseProperties.put("ranger.plugin.hbase.policy.rest.url", "{{policymgr_mgr_url}}");
+    newHbaseProperties.put("ranger.plugin.hbase.policy.rest.url", "http://localhost:6080");
+    testUpdateRangerUrl(oldHbaseProperties, newHbaseProperties, "ranger-hbase-security");
+
+    Map<String, String> oldKnoxProperties = new HashMap<String, String>();
+    Map<String, String> newKnoxProperties = new HashMap<String, String>();
+    oldKnoxProperties.put("ranger.plugin.knox.policy.rest.url", "{{policymgr_mgr_url}}");
+    newKnoxProperties.put("ranger.plugin.knox.policy.rest.url", "http://localhost:6080");
+    testUpdateRangerUrl(oldKnoxProperties, newKnoxProperties, "ranger-knox-security");
+
+    Map<String, String> oldStormProperties = new HashMap<String, String>();
+    Map<String, String> newStormProperties = new HashMap<String, String>();
+    oldStormProperties.put("ranger.plugin.storm.policy.rest.url", "{{policymgr_mgr_url}}");
+    newStormProperties.put("ranger.plugin.storm.policy.rest.url", "http://localhost:6080");
+    testUpdateRangerUrl(oldStormProperties, newStormProperties, "ranger-storm-security");
+
+    Map<String, String> oldYarnProperties = new HashMap<String, String>();
+    Map<String, String> newYarnProperties = new HashMap<String, String>();
+    oldYarnProperties.put("ranger.plugin.yarn.policy.rest.url", "{{policymgr_mgr_url}}");
+    newYarnProperties.put("ranger.plugin.yarn.policy.rest.url", "http://localhost:6080");
+    testUpdateRangerUrl(oldYarnProperties, newYarnProperties, "ranger-yarn-security");
+
+    Map<String, String> oldKafkaProperties = new HashMap<String, String>();
+    Map<String, String> newKafkaProperties = new HashMap<String, String>();
+    oldKafkaProperties.put("ranger.plugin.kafka.policy.rest.url", "{{policymgr_mgr_url}}");
+    newKafkaProperties.put("ranger.plugin.kafka.policy.rest.url", "http://localhost:6080");
+    testUpdateRangerUrl(oldKafkaProperties, newKafkaProperties, "ranger-kafka-security");
+
+    Map<String, String> oldAtlasProperties = new HashMap<String, String>();
+    Map<String, String> newAtlasProperties = new HashMap<String, String>();
+    oldAtlasProperties.put("ranger.plugin.atlas.policy.rest.url", "{{policymgr_mgr_url}}");
+    newAtlasProperties.put("ranger.plugin.atlas.policy.rest.url", "http://localhost:6080");
+    testUpdateRangerUrl(oldAtlasProperties, newAtlasProperties, "ranger-atlas-security");
+
+    Map<String, String> oldKmsProperties = new HashMap<String, String>();
+    Map<String, String> newKmsProperties = new HashMap<String, String>();
+    oldKmsProperties.put("ranger.plugin.kms.policy.rest.url", "{{policymgr_mgr_url}}");
+    newKmsProperties.put("ranger.plugin.kms.policy.rest.url", "http://localhost:6080");
+    testUpdateRangerUrl(oldKmsProperties, newKmsProperties, "ranger-kms-security");
+  }
+
+  public void testUpdateRangerUrl(Map<String, String> oldProperties, Map<String, String> newProperties, String configType) throws Exception {
+    Map<String, String> adminProperties = new HashMap<String, String>() {
+      {
+        put("policymgr_external_url", "http://localhost:6080");
+      }
+    };
+
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    reset(clusters, cluster);
+
+    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", cluster);
+    }}).once();
+
+    Config mockRangerPluginConfig = easyMockSupport.createNiceMock(Config.class);
+    Config mockRangerAdminProperties = easyMockSupport.createNiceMock(Config.class);
+
+    expect(cluster.getDesiredConfigByType("admin-properties")).andReturn(mockRangerAdminProperties).anyTimes();
+    expect(mockRangerAdminProperties.getProperties()).andReturn(adminProperties).anyTimes();
+
+    expect(cluster.getDesiredConfigByType(configType)).andReturn(mockRangerPluginConfig).anyTimes();
+    expect(mockRangerPluginConfig.getProperties()).andReturn(oldProperties).anyTimes();
+
+    replay(clusters, mockRangerPluginConfig, mockRangerAdminProperties, cluster);
+
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+    .addMockedMethod("createConfiguration")
+    .addMockedMethod("getClusters", new Class[] { })
+    .addMockedMethod("createConfig")
+    .withConstructor(actionManager, clusters, injector)
+    .createNiceMock();
+
+    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
+
+    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
+
+    replay(controller, injector2);
+    new UpgradeCatalog250(injector2).updateRangerUrlConfigs();
+    easyMockSupport.verifyAll();
+
+    Map<String, String> updatedProperties = propertiesCapture.getValue();
+    assertTrue(Maps.difference(newProperties, updatedProperties).areEqual());
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
index 99d2251..ea00a37 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
@@ -380,10 +380,10 @@
             "hive.optimize.mapjoin.mapreduce": "true"
         }, 
         "ranger-hive-plugin-properties": {
-            "ranger-hive-plugin-enabled":"yes"
+            "ranger-hive-plugin-enabled":"No"
         },
         "ranger-knox-plugin-properties": {
-            "ranger-knox-plugin-enabled":"yes"
+            "ranger-knox-plugin-enabled":"No"
         },
         "yarn-site": {
             "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
@@ -626,7 +626,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "Yes", 
+            "ranger-hdfs-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 849b737..2a27eca 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -534,6 +534,9 @@
             "yarn.http.policy": "HTTP_ONLY",
             "yarn.resourcemanager.webapp.https.address": "c6402.ambari.apache.org:8090"
         },
+        "ranger-yarn-plugin-properties": {
+            "ranger-yarn-plugin-enabled": "No"
+        },
         "tez-site": {
             "tez.am.log.level": "WARN",
             "tez.lib.uris": "hdfs:///apps/tez/,hdfs:///apps/tez/lib/",
@@ -582,7 +585,8 @@
             "hive_log_dir": "/var/log/hive",
             "hive_user": "hive",
             "hcat_log_dir": "/var/log/webhcat",
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "ranger-env": {
             "xml_configurations_supported" : "false"
@@ -750,7 +754,7 @@
             "XAAUDIT.HDFS.IS_ENABLED": "false",
             "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}",
             "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-            "ranger-hbase-plugin-enabled": "Yes",
+            "ranger-hbase-plugin-enabled": "No",
             "REPOSITORY_NAME": "{{repo_name}}",
             "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
             "XAAUDIT.DB.IS_ENABLED": "true",
@@ -791,7 +795,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
             "XAAUDIT.SOLR.IS_ENABLED": "false",
             "hadoop.rpc.protection": "-",
-            "ranger-hdfs-plugin-enabled": "Yes",
+            "ranger-hdfs-plugin-enabled": "No",
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
             "policy_user": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
index 5659ba6..8c17e86 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
@@ -532,7 +532,8 @@
             "hive_log_dir": "/var/log/hive", 
             "hive_user": "hive", 
             "hcat_log_dir": "/var/log/webhcat", 
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "hbase-env": {
             "hbase_pid_dir": "/var/run/hbase", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
index 2b92cca..009ff6d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
@@ -317,7 +317,8 @@
             "hive_log_dir": "/var/log/hive", 
             "hive_user": "hive", 
             "hcat_log_dir": "/var/log/webhcat", 
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
       "cluster-env": {
         "managed_hdfs_resource_property_names": "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
index acac36f..2b078c3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
@@ -319,7 +319,8 @@
             "hive_log_dir": "/var/log/hive", 
             "hive_user": "hive", 
             "hcat_log_dir": "/var/log/webhcat", 
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
       "cluster-env": {
         "managed_hdfs_resource_property_names": "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
index a02a874..571b737 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
@@ -569,7 +569,8 @@
             "hive_log_dir": "/var/log/hive", 
             "hive_user": "hive", 
             "hcat_log_dir": "/var/log/webhcat", 
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "ranger-env": {
             "xml_configurations_supported" : "false"

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
index 73c49a1..7fdb449 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
@@ -542,7 +542,8 @@
             "hive_log_dir": "/var/log/hive", 
             "hive_user": "hive", 
             "hcat_log_dir": "/var/log/webhcat", 
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
       "cluster-env": {
         "managed_hdfs_resource_property_names": "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
index a0e7e9d..5080d30 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_with_bucket.json
@@ -742,7 +742,7 @@
             "XAAUDIT.HDFS.IS_ENABLED": "false",
             "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}",
             "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-            "ranger-hbase-plugin-enabled": "Yes",
+            "ranger-hbase-plugin-enabled": "No",
             "REPOSITORY_NAME": "{{repo_name}}",
             "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
             "XAAUDIT.DB.IS_ENABLED": "true",
@@ -783,7 +783,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits",
             "XAAUDIT.SOLR.IS_ENABLED": "false",
             "hadoop.rpc.protection": "-",
-            "ranger-hdfs-plugin-enabled": "Yes",
+            "ranger-hdfs-plugin-enabled": "No",
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
             "policy_user": "ambari-qa",

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
index 0e666ba..841dfda 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
@@ -506,7 +506,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "Yes", 
+            "ranger-hdfs-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
index baec1fa..96f4d9d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
@@ -506,7 +506,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "Yes", 
+            "ranger-hdfs-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
index 61b9fe0..de2742f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
@@ -507,7 +507,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "Yes", 
+            "ranger-hdfs-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
index 6b57397..ba0fa8f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
@@ -507,7 +507,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "Yes", 
+            "ranger-hdfs-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index 1cdb982..888886e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -234,7 +234,7 @@
             "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT"
         }, 
         "ranger-hdfs-plugin-properties" : {
-            "ranger-hdfs-plugin-enabled":"yes"
+            "ranger-hdfs-plugin-enabled":"No"
         },
         "hdfs-log4j": {
             "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", 
@@ -508,7 +508,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "Yes", 
+            "ranger-hdfs-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
index 15902af..f06fae3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
@@ -526,7 +526,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "Yes", 
+            "ranger-hdfs-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
index e6a8676..c5ffcc9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
@@ -538,7 +538,7 @@
             "hbase_java_io_tmpdir" : "/tmp"
         }, 
         "ranger-hbase-plugin-properties": {
-            "ranger-hbase-plugin-enabled":"yes"
+            "ranger-hbase-plugin-enabled":"No"
         },        
         "ganglia-env": {
             "gmond_user": "nobody", 
@@ -583,7 +583,7 @@
             "XAAUDIT.HDFS.IS_ENABLED": "false", 
             "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}", 
             "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-            "ranger-hbase-plugin-enabled": "Yes", 
+            "ranger-hbase-plugin-enabled": "No", 
             "REPOSITORY_NAME": "{{repo_name}}", 
             "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
             "XAAUDIT.DB.IS_ENABLED": "true", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
index b1d603b..114bdff 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
@@ -539,7 +539,7 @@
             "hbase_java_io_tmpdir" : "/tmp"
         }, 
         "ranger-hbase-plugin-properties": {
-            "ranger-hbase-plugin-enabled":"yes"
+            "ranger-hbase-plugin-enabled":"No"
         },
         "ganglia-env": {
             "gmond_user": "nobody", 
@@ -584,7 +584,7 @@
             "XAAUDIT.HDFS.IS_ENABLED": "false", 
             "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}", 
             "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-            "ranger-hbase-plugin-enabled": "Yes", 
+            "ranger-hbase-plugin-enabled": "No", 
             "REPOSITORY_NAME": "{{repo_name}}", 
             "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
             "XAAUDIT.DB.IS_ENABLED": "true", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
index 435291a..d82ca99 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
@@ -538,7 +538,7 @@
             "hbase_java_io_tmpdir" : "/tmp"
         }, 
         "ranger-hbase-plugin-properties": {
-            "ranger-hbase-plugin-enabled":"yes"
+            "ranger-hbase-plugin-enabled":"No"
         },
         "ganglia-env": {
             "gmond_user": "nobody", 
@@ -583,7 +583,7 @@
             "XAAUDIT.HDFS.IS_ENABLED": "false", 
             "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}", 
             "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-            "ranger-hbase-plugin-enabled": "Yes", 
+            "ranger-hbase-plugin-enabled": "No", 
             "REPOSITORY_NAME": "{{repo_name}}", 
             "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
             "XAAUDIT.DB.IS_ENABLED": "true", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
index 9f0c236..f4b8a70 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
@@ -183,7 +183,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "Yes", 
+            "ranger-hdfs-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index 3367e1b..5327865 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -387,7 +387,7 @@
             "ipc.client.connection.maxidletime": "30000"
         }, 
         "ranger-hdfs-plugin-properties" : {
-            "ranger-hdfs-plugin-enabled":"yes"
+            "ranger-hdfs-plugin-enabled":"No"
         },
 		"ranger-hive-plugin-properties": {
             "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
@@ -592,6 +592,9 @@
             "yarn.http.policy": "HTTP_ONLY",
             "yarn.resourcemanager.webapp.https.address": "c6402.ambari.apache.org:8090"
         },
+        "ranger-yarn-plugin-properties" : {
+            "ranger-yarn-plugin-enabled":"No"
+        },
         "yarn-env": {
             "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
             "apptimelineserver_heapsize": "1024", 
@@ -649,7 +652,8 @@
             "hive_log_dir": "/var/log/hive", 
             "hive_user": "hive", 
             "hcat_log_dir": "/var/log/webhcat", 
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "hbase-env": {
             "hbase_pid_dir": "/var/run/hbase", 
@@ -771,7 +775,7 @@
             "XAAUDIT.HDFS.IS_ENABLED": "false", 
             "SQL_CONNECTOR_JAR": "{{sql_connector_jar}}", 
             "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-            "ranger-hbase-plugin-enabled": "Yes", 
+            "ranger-hbase-plugin-enabled": "No", 
             "REPOSITORY_NAME": "{{repo_name}}", 
             "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
             "XAAUDIT.DB.IS_ENABLED": "true", 
@@ -812,7 +816,7 @@
             "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "Yes", 
+            "ranger-hdfs-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
index 699c1f5..bf4ff12 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
@@ -585,7 +585,8 @@
             "hive_log_dir": "/var/log/hive", 
             "hive_user": "hive", 
             "hcat_log_dir": "/var/log/webhcat", 
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "hbase-env": {
             "hbase_pid_dir": "/var/run/hbase", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
index 92a7516..27cb63e 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
@@ -235,7 +235,7 @@
             "XAAUDIT.DB.DATABASE_NAME": "{{xa_audit_db_name}}", 
             "XAAUDIT.DB.HOSTNAME": "{{xa_db_host}}", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
-            "ranger-storm-plugin-enabled": "Yes", 
+            "ranger-storm-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "XAAUDIT.DB.USER_NAME": "{{xa_audit_db_user}}", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1/configs/default.json
index 6ee7612..e04e1eb 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default.json
@@ -254,7 +254,7 @@
             "XAAUDIT.DB.DATABASE_NAME": "{{xa_audit_db_name}}", 
             "XAAUDIT.DB.HOSTNAME": "{{xa_db_host}}", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
-            "ranger-storm-plugin-enabled": "Yes", 
+            "ranger-storm-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "XAAUDIT.DB.USER_NAME": "{{xa_audit_db_user}}", 
@@ -649,7 +649,8 @@
             "hive_log_dir": "/var/log/hive", 
             "hive_user": "hive", 
             "hcat_log_dir": "/var/log/webhcat", 
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "hbase-env": {
             "hbase_pid_dir": "/var/run/hbase", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
index 9bd239c..1b027b7 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
@@ -246,7 +246,7 @@
             "XAAUDIT.DB.DATABASE_NAME": "{{xa_audit_db_name}}", 
             "XAAUDIT.DB.HOSTNAME": "{{xa_db_host}}", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
-            "ranger-storm-plugin-enabled": "Yes", 
+            "ranger-storm-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "XAAUDIT.DB.USER_NAME": "{{xa_audit_db_user}}", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
index 0e4bfc3..61b359c 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
@@ -102,7 +102,7 @@
             "XAAUDIT.DB.DATABASE_NAME": "{{xa_audit_db_name}}", 
             "XAAUDIT.DB.HOSTNAME": "{{xa_db_host}}", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
-            "ranger-storm-plugin-enabled": "Yes", 
+            "ranger-storm-plugin-enabled": "No", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "XAAUDIT.DB.USER_NAME": "{{xa_audit_db_user}}", 
@@ -640,7 +640,8 @@
             "hive_log_dir": "/var/log/hive", 
             "hive_user": "hive", 
             "hcat_log_dir": "/var/log/webhcat", 
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "hbase-env": {
             "hbase_pid_dir": "/var/run/hbase", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index de8f2b0..8a82c99 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -3656,7 +3656,18 @@ class TestHDP22StackAdvisor(TestCase):
       }
     ]
 
-    res = self.stackAdvisor.validateHiveConfigurationsEnv(properties, {}, configurations, {}, {})
+    services = {
+      "services":
+      [
+        {
+          "StackServices": {
+           "service_name" : "RANGER"
+          }
+        }
+      ]
+    }
+
+    res = self.stackAdvisor.validateHiveConfigurationsEnv(properties, {}, configurations, services, {})
     self.assertEquals(res, res_expected)
 
     # 2) fail: hive_security_authorization=Ranger but ranger plugin is disabled in ranger-env
@@ -3674,6 +3685,14 @@ class TestHDP22StackAdvisor(TestCase):
       }
     }
     services = {
+      "services":
+      [
+        {
+          "StackServices": {
+           "service_name" : "RANGER"
+          }
+        }
+      ],
       "configurations": configurations
     }
     res_expected = []
@@ -3946,6 +3965,14 @@ class TestHDP22StackAdvisor(TestCase):
       }
     }
     services = {
+      "services":
+      [
+        {
+          "StackServices": {
+           "service_name" : "RANGER"
+          }
+        }
+      ],
       "configurations": configurations
     }
     res_expected = []
@@ -3982,6 +4009,14 @@ class TestHDP22StackAdvisor(TestCase):
       }
     }
     services = {
+      "services":
+      [
+        {
+          "StackServices": {
+           "service_name" : "RANGER"
+          }
+        }
+      ],
       "configurations": configurations
     }
     res_expected = []
@@ -4018,6 +4053,14 @@ class TestHDP22StackAdvisor(TestCase):
       }
     }
     services = {
+      "services":
+      [
+        {
+          "StackServices": {
+           "service_name" : "RANGER"
+          }
+        }
+      ],
       "configurations": configurations
     }
     res_expected = []
@@ -4054,6 +4097,14 @@ class TestHDP22StackAdvisor(TestCase):
       }
     }
     services = {
+      "services":
+      [
+        {
+          "StackServices": {
+           "service_name" : "RANGER"
+          }
+        }
+      ],
       "configurations": configurations
     }
     res_expected = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/default.json b/ambari-server/src/test/python/stacks/2.2/configs/default.json
index 7583e27..bcb021b 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/default.json
@@ -211,7 +211,7 @@
             "XAAUDIT.DB.HOSTNAME": "{{xa_db_host}}", 
             "XAAUDIT.SOLR.IS_ENABLED": "false", 
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-            "ranger-knox-plugin-enabled": "Yes", 
+            "ranger-knox-plugin-enabled": "No", 
             "XAAUDIT.DB.USER_NAME": "{{xa_audit_db_user}}", 
             "policy_user": "ambari-qa", 
             "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
@@ -333,10 +333,10 @@
         "log.retention.hours": "168"
       },
       "ranger-hbase-plugin-properties": {
-            "ranger-hbase-plugin-enabled":"yes"
+            "ranger-hbase-plugin-enabled":"No"
       },
       "ranger-hive-plugin-properties": {
-            "ranger-hive-plugin-enabled":"yes"
+            "ranger-hive-plugin-enabled":"No"
        },
         "accumulo-env": {
             "accumulo_user": "accumulo",

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
index cb476d3..7a29ea0 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
@@ -500,7 +500,8 @@
             "hive_log_dir": "/var/log/hive",
             "hive_user": "hive",
             "hcat_log_dir": "/var/log/webhcat",
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "webhcat-site": {
             "templeton.pig.path": "pig.tar.gz/pig/bin/pig",

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index 295aee8..2149877 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -1557,7 +1557,8 @@ class TestHDP23StackAdvisor(TestCase):
         'properties': {
           'ranger-storm-plugin-enabled': 'No',
         }
-      }
+      },
+      'ranger-knox-security': {'properties': {}}
     }
 
     recommendedConfigurations = {}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
index ffdd5e9..6531750 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
@@ -618,7 +618,8 @@
             "hive_log_dir": "/var/log/hive",
             "hive_user": "hive",
             "hcat_log_dir": "/var/log/webhcat",
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "ranger-env": {
             "xml_configurations_supported" : "false"

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
index def0e54..10d1d99 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_ha.json
@@ -617,7 +617,8 @@
             "hive_log_dir": "/var/log/hive",
             "hive_user": "hive",
             "hcat_log_dir": "/var/log/webhcat",
-            "hive_database": "New MySQL Database"
+            "hive_database": "New MySQL Database",
+            "hive_security_authorization": "None"
         },
         "ranger-env": {
             "xml_configurations_supported" : "false"

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-web/app/controllers/main/service/info/configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/configs.js b/ambari-web/app/controllers/main/service/info/configs.js
index 60a0e02..d946ea0 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -514,12 +514,12 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.AddSecurityConfi
     var selectedService = this.get('stepConfigs').findProperty('serviceName', this.get('content.serviceName'));
     this.set('selectedService', selectedService);
     this.checkOverrideProperty(selectedService);
-    if (App.Service.find().someProperty('serviceName', 'RANGER')) {
+    /* if (App.Service.find().someProperty('serviceName', 'RANGER')) {
       App.router.get('mainServiceInfoSummaryController').updateRangerPluginsStatus();
       this.setVisibilityForRangerProperties(selectedService);
     } else {
       App.config.removeRangerConfigs(this.get('stepConfigs'));
-    }
+    } */
     this.loadConfigRecommendations(null, this._onLoadComplete.bind(this));
     App.loadTimer.finish('Service Configs Page');
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-web/app/controllers/wizard/step7_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js b/ambari-web/app/controllers/wizard/step7_controller.js
index 2d83bca..61fd910 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -535,10 +535,10 @@ App.WizardStep7Controller = Em.Controller.extend(App.ServerValidatorMixin, App.E
     this.set('stepConfigs', serviceConfigs);
     this.checkHostOverrideInstaller();
     this.selectProperService();
-    var rangerService = App.StackService.find().findProperty('serviceName', 'RANGER');
+    /* var rangerService = App.StackService.find().findProperty('serviceName', 'RANGER');
     if (rangerService && !rangerService.get('isInstalled') && !rangerService.get('isSelected')) {
       App.config.removeRangerConfigs(this.get('stepConfigs'));
-    }
+    } */
     console.timeEnd('applyServicesConfigs execution time: ');
     console.time('loadConfigRecommendations execution time: ');
     this.loadConfigRecommendations(null, this.completeConfigLoading.bind(this));


[06/50] [abbrv] ambari git commit: AMBARI-19561. After ambari only upgrade Property:"yarn.nodemanager.linux-container-executor.cgroups.mount-path" become required (dgrinenko via dlysnichenko)

Posted by nc...@apache.org.
AMBARI-19561. After ambari only upgrade Property:"yarn.nodemanager.linux-container-executor.cgroups.mount-path" become required (dgrinenko via dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b7d8f5e9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b7d8f5e9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b7d8f5e9

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b7d8f5e9767e94516a6990ac09bbbfb6ddece2fd
Parents: 112cea4
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Tue Jan 17 12:06:41 2017 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Tue Jan 17 12:06:41 2017 +0200

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog250.java       |  39 +++++++
 .../YARN/2.1.0.2.0/kerberos.json                |   1 -
 .../YARN/3.0.0.3.0/kerberos.json                |   1 -
 .../stacks/HDP/2.2/services/YARN/kerberos.json  |   1 -
 .../HDP/2.3.ECS/services/YARN/kerberos.json     |   1 -
 .../stacks/HDP/2.3/services/YARN/kerberos.json  |   1 -
 .../stacks/HDP/2.5/services/YARN/kerberos.json  |   1 -
 .../stacks/PERF/1.0/services/YARN/kerberos.json |   1 -
 .../server/upgrade/UpgradeCatalog250Test.java   | 106 +++++++++++++++++++
 .../2.2/configs/pig-service-check-secure.json   |   3 +-
 .../test_kerberos_descriptor_2_1_3.json         |   1 -
 .../data/stacks/HDP-2.1/service_components.json |   1 -
 12 files changed, 146 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index 3d84968..29e1f17 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@ -64,6 +64,10 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
   private static final String HBASE_ROOTDIR = "hbase.rootdir";
   private static final String HADOOP_ENV = "hadoop-env";
   private static final String KAFKA_BROKER = "kafka-broker";
+  private static final String YARN_SITE_CONFIG = "yarn-site";
+  private static final String YARN_ENV_CONFIG = "yarn-env";
+  private static final String YARN_LCE_CGROUPS_MOUNT_PATH = "yarn.nodemanager.linux-container-executor.cgroups.mount-path";
+  private static final String YARN_CGROUPS_ENABLED = "yarn_cgroups_enabled";
   private static final String KAFKA_TIMELINE_METRICS_HOST = "kafka.timeline.metrics.host";
 
   public static final String COMPONENT_TABLE = "servicecomponentdesiredstate";
@@ -161,6 +165,7 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
     updateAtlasConfigs();
     updateLogSearchConfigs();
     updateAmbariInfraConfigs();
+    updateYarnSite();
     addManageServiceAutoStartPermissions();
   }
 
@@ -179,6 +184,40 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
     dbAccessor.addUniqueConstraint(GROUPS_TABLE, "UNQ_groups_0", "group_name", "group_type");
   }
 
+  /**
+   * Updates {@code yarn-site} in the following ways:
+   *
+   * Remove {@code YARN_LCE_CGROUPS_MOUNT_PATH} if  {@code YARN_CGROUPS_ENABLED} is {@code false} and
+   * {@code YARN_LCE_CGROUPS_MOUNT_PATH} is empty string
+   *
+   * @throws AmbariException
+   */
+  protected void updateYarnSite() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+
+    for (final Cluster cluster : clusterMap.values()) {
+      Config yarnEnvConfig = cluster.getDesiredConfigByType(YARN_ENV_CONFIG);
+      Config yarnSiteConfig = cluster.getDesiredConfigByType(YARN_SITE_CONFIG);
+
+      if (yarnEnvConfig != null && yarnSiteConfig != null) {
+        String cgroupEnabled = yarnEnvConfig.getProperties().get(YARN_CGROUPS_ENABLED);
+        String mountPath = yarnSiteConfig.getProperties().get(YARN_LCE_CGROUPS_MOUNT_PATH);
+
+        if (StringUtils.isEmpty(mountPath) && cgroupEnabled != null
+          && cgroupEnabled.trim().equalsIgnoreCase("false")){
+
+          removeConfigurationPropertiesFromCluster(cluster, YARN_SITE_CONFIG, new HashSet<String>(){{
+            add(YARN_LCE_CGROUPS_MOUNT_PATH);
+          }});
+
+        }
+      }
+
+    }
+  }
+
   protected void updateHiveLlapConfigs() throws AmbariException {
     AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
     Clusters clusters = ambariManagementController.getClusters();

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
index c307800..6b61c13 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
@@ -31,7 +31,6 @@
             "yarn.resourcemanager.proxyusers.*.hosts": "",
             "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",
             "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
index af920f1..d334887 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
@@ -32,7 +32,6 @@
             "yarn.resourcemanager.proxyusers.*.hosts": "",
             "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",
             "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
index 3a183cc..ad30b76 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
@@ -31,7 +31,6 @@
             "yarn.resourcemanager.proxyusers.*.hosts": "",
             "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "",
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
index e11ce84..7977941 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
@@ -34,7 +34,6 @@
             "yarn.resourcemanager.proxyusers.*.hosts": "",
             "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",
             "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
index 1a6cf5b..73addb1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
@@ -32,7 +32,6 @@
             "yarn.resourcemanager.proxyusers.*.hosts": "",
             "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",
             "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
index af920f1..d334887 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
@@ -32,7 +32,6 @@
             "yarn.resourcemanager.proxyusers.*.hosts": "",
             "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",
             "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
index f33c07d..7e74237 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
@@ -32,7 +32,6 @@
             "yarn.resourcemanager.proxyusers.*.hosts": "",
             "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "",
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure"
           }
         },

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index 093de4b..1830f24 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -22,6 +22,7 @@ import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMockBuilder;
+import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
@@ -39,8 +40,10 @@ import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import javax.persistence.EntityManager;
 
@@ -84,6 +87,8 @@ import com.google.inject.Injector;
 import com.google.inject.Module;
 import com.google.inject.Provider;
 
+import junit.framework.AssertionFailedError;
+
 /**
  * {@link UpgradeCatalog250} unit tests.
  */
@@ -339,6 +344,107 @@ public class UpgradeCatalog250Test {
   }
 
   @Test
+  public void testUpdateYarnSite() throws Exception{
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    final String propertyToRemove = "yarn.nodemanager.linux-container-executor.cgroups.mount-path";
+    final AmbariManagementController ambariManagementController = createNiceMock(AmbariManagementController.class);
+    Config mockYarnEnv = easyMockSupport.createNiceMock(Config.class);
+    Config mockYarnSite = easyMockSupport.createNiceMock(Config.class);
+
+    HashMap<String, String> yarnEnv = new HashMap<String, String>(){{
+      put("yarn_cgroups_enabled", "false");
+    }};
+
+    HashMap<String, String> yarnSite = new HashMap<String, String>() {{
+      put(propertyToRemove, "");
+    }};
+
+    reset(clusters, cluster, injector);
+
+    expect(injector.getInstance(AmbariManagementController.class)).andReturn(ambariManagementController).atLeastOnce();
+    expect(ambariManagementController.getClusters()).andReturn(clusters).atLeastOnce();
+    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", cluster);
+    }}).once();
+    expect(cluster.getDesiredConfigByType("yarn-env")).andReturn(mockYarnEnv).atLeastOnce();
+    expect(mockYarnEnv.getProperties()).andReturn(yarnEnv).anyTimes();
+    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(mockYarnSite).atLeastOnce();
+    expect(mockYarnSite.getProperties()).andReturn(yarnSite).anyTimes();
+
+    replay(clusters, cluster, injector, ambariManagementController, mockYarnEnv, mockYarnSite);
+
+    UpgradeCatalog250 upgradeCatalog250 = createMockBuilder(UpgradeCatalog250.class)
+     .addMockedMethod("removeConfigurationPropertiesFromCluster")
+     .withConstructor(injector)
+     .createNiceMock();
+
+    Capture<HashSet<String>> removeConfigName = EasyMock.newCapture();
+
+    upgradeCatalog250.removeConfigurationPropertiesFromCluster(anyObject(Cluster.class), eq("yarn-site"), capture(removeConfigName));
+    EasyMock.expectLastCall();
+
+    replay(upgradeCatalog250);
+
+    upgradeCatalog250.updateYarnSite();
+
+    easyMockSupport.verifyAll();
+
+    Set<String> updatedProperties = removeConfigName.getValue();
+    assertTrue(updatedProperties.contains(propertyToRemove));
+
+    reset(injector);
+  }
+
+  @Test
+  public void testUpdateYarnSiteWithEnabledCGroups() throws Exception{
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    final String propertyToRemove = "yarn.nodemanager.linux-container-executor.cgroups.mount-path";
+    final AmbariManagementController ambariManagementController = createNiceMock(AmbariManagementController.class);
+    Config mockYarnEnv = easyMockSupport.createNiceMock(Config.class);
+    Config mockYarnSite = easyMockSupport.createNiceMock(Config.class);
+
+    HashMap<String, String> yarnEnv = new HashMap<String, String>(){{
+      put("yarn_cgroups_enabled", "true");
+    }};
+
+    HashMap<String, String> yarnSite = new HashMap<String, String>() {{
+      put(propertyToRemove, "");
+    }};
+
+    reset(clusters, cluster, injector);
+
+    expect(injector.getInstance(AmbariManagementController.class)).andReturn(ambariManagementController).atLeastOnce();
+    expect(ambariManagementController.getClusters()).andReturn(clusters).atLeastOnce();
+    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", cluster);
+    }}).once();
+    expect(cluster.getDesiredConfigByType("yarn-env")).andReturn(mockYarnEnv).atLeastOnce();
+    expect(mockYarnEnv.getProperties()).andReturn(yarnEnv).anyTimes();
+    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(mockYarnSite).atLeastOnce();
+    expect(mockYarnSite.getProperties()).andReturn(yarnSite).anyTimes();
+
+    replay(clusters, cluster, injector, ambariManagementController, mockYarnEnv, mockYarnSite);
+
+    UpgradeCatalog250 upgradeCatalog250 = createMockBuilder(UpgradeCatalog250.class)
+      .addMockedMethod("removeConfigurationPropertiesFromCluster")
+      .withConstructor(injector)
+      .createNiceMock();
+
+    Capture<HashSet<String>> removeConfigName = EasyMock.newCapture();
+
+    upgradeCatalog250.removeConfigurationPropertiesFromCluster(anyObject(Cluster.class), eq("yarn-site"), capture(removeConfigName));
+    EasyMock.expectLastCall().andThrow(new AssertionFailedError()).anyTimes();
+
+    replay(upgradeCatalog250);
+
+    upgradeCatalog250.updateYarnSite();
+
+    reset(injector);
+  }
+
+  @Test
   public void testAmsEnvUpdateConfigs() throws Exception{
 
     Map<String, String> oldPropertiesAmsEnv = new HashMap<String, String>() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
index 0d0c6f5..f14eb52 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
@@ -470,8 +470,7 @@
             "yarn.resourcemanager.recovery.enabled": "true", 
             "yarn.timeline-service.http-authentication.cookie.domain": "", 
             "yarn.resourcemanager.zk-retry-interval-ms": "1000", 
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "", 
-            "yarn.admin.acl": "", 
+            "yarn.admin.acl": "",
             "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels", 
             "yarn.client.nodemanager-connect.retry-interval-ms": "10000", 
             "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
index a9e0bcd..bcc5359 100644
--- a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
+++ b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
@@ -802,7 +802,6 @@
         "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
         "yarn.timeline-service.http-authentication.cookie.path": "",
         "yarn.timeline-service.http-authentication.type": "kerberos",
-        "yarn.nodemanager.linux-container-executor.cgroups.mount-path": "",
         "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
         "yarn.acl.enable": "true",
         "yarn.timeline-service.http-authentication.signer.secret.provider": "",

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7d8f5e9/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
index d853f24..147c1c0 100644
--- a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
+++ b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
@@ -2898,7 +2898,6 @@
                 "yarn.timeline-service.http-authentication.kerberos.name.rules" : "",
                 "yarn.timeline-service.http-authentication.cookie.path" : "",
                 "yarn.timeline-service.http-authentication.type" : "kerberos",
-                "yarn.nodemanager.linux-container-executor.cgroups.mount-path" : "",
                 "yarn.resourcemanager.proxy-user-privileges.enabled" : "true",
                 "yarn.acl.enable" : "true",
                 "yarn.timeline-service.http-authentication.signer.secret.provider" : "",


[42/50] [abbrv] ambari git commit: AMBARI-19044 Install & configure Ranger plugin components independently of Ranger admin components. Fix UT (mugdha)

Posted by nc...@apache.org.
AMBARI-19044 Install & configure Ranger plugin components independently of Ranger admin components. Fix UT (mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bc77c9ca
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bc77c9ca
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bc77c9ca

Branch: refs/heads/branch-dev-patch-upgrade
Commit: bc77c9ca0fa8843a7e2a592efcfbcff5655dbb90
Parents: 7d2388b
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Wed Jan 18 12:32:45 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Wed Jan 18 12:51:22 2017 +0530

----------------------------------------------------------------------
 ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bc77c9ca/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
index a6ed6ce..7f77d83 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
@@ -219,7 +219,7 @@
             "XAAUDIT.DB.IS_ENABLED": "true", 
             "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
             "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "No", 
+            "ranger-hdfs-plugin-enabled": "Yes",
             "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
             "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
             "policy_user": "ambari-qa", 


[18/50] [abbrv] ambari git commit: AMBARI-19591. Ranger Admin HA Wizard: next button disappears (akovalenko)

Posted by nc...@apache.org.
AMBARI-19591. Ranger Admin HA Wizard: next button disappears (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e83837e1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e83837e1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e83837e1

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e83837e1092f8eebc3e942a5d7f21e9749c0a12d
Parents: 8371667
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Tue Jan 17 19:50:18 2017 +0200
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Tue Jan 17 19:50:18 2017 +0200

----------------------------------------------------------------------
 .../main/admin/highAvailability/rangerAdmin/step1.hbs          | 6 ++++--
 .../main/admin/highAvailability/rangerAdmin/step3.hbs          | 1 -
 2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e83837e1/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step1.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step1.hbs b/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step1.hbs
index ed7098c..a076629 100644
--- a/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step1.hbs
+++ b/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step1.hbs
@@ -39,6 +39,8 @@ right ownership.  The ASF licenses this file
     </div>
   </div>
 </div>
-<div class="btn-area" class="wizard-footer col-md-12">
-  <button class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action next}}>{{t common.next}} &rarr;</button>
+<div class="wizard-footer col-md-12">
+  <div class="btn-area">
+    <button class="btn btn-success pull-right" {{bindAttr disabled="isSubmitDisabled"}} {{action next}}>{{t common.next}} &rarr;</button>
+  </div>
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83837e1/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs b/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs
index 5342728..ce71f69 100644
--- a/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs
+++ b/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs
@@ -62,4 +62,3 @@
     <a class="btn btn-success pull-right" {{action next}}>{{t common.next}} &rarr;</a>
   </div>
 </div>
-


[30/50] [abbrv] ambari git commit: Revert "AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)"

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json.orig b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json.orig
deleted file mode 100644
index f14eb52..0000000
--- a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json.orig
+++ /dev/null
@@ -1,651 +0,0 @@
-{
-    "configuration_attributes": {
-        "mapred-site": {}, 
-        "pig-env": {}, 
-        "ranger-hdfs-plugin-properties": {}, 
-        "kerberos-env": {}, 
-        "tez-site": {}, 
-        "hdfs-site": {}, 
-        "tez-env": {}, 
-        "yarn-log4j": {}, 
-        "hadoop-policy": {}, 
-        "hdfs-log4j": {}, 
-        "mapred-env": {}, 
-        "krb5-conf": {}, 
-        "pig-properties": {}, 
-        "core-site": {}, 
-        "yarn-env": {}, 
-        "hadoop-env": {}, 
-        "zookeeper-log4j": {}, 
-        "yarn-site": {}, 
-        "capacity-scheduler": {}, 
-        "zoo.cfg": {}, 
-        "zookeeper-env": {}, 
-        "pig-log4j": {}, 
-        "cluster-env": {}
-    }, 
-    "commandParams": {
-        "command_timeout": "300", 
-        "script": "scripts/service_check.py", 
-        "script_type": "PYTHON", 
-        "service_package_folder": "common-services/PIG/0.12.0.2.0/package", 
-        "hooks_folder": "HDP/2.0.6/hooks"
-    }, 
-    "roleCommand": "SERVICE_CHECK", 
-    "kerberosCommandParams": [], 
-    "clusterName": "c1", 
-    "hostname": "c6403.ambari.apache.org", 
-    "hostLevelParams": {
-        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
-        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
-        "ambari_db_rca_password": "mapred", 
-        "java_home": "/usr/jdk64/jdk1.7.0_67",
-        "java_version": "8",
-        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
-        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
-        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
-        "stack_version": "2.2", 
-        "stack_name": "HDP", 
-        "db_name": "ambari", 
-        "ambari_db_rca_driver": "org.postgresql.Driver", 
-        "jdk_name": "jdk-7u67-linux-x64.tar.gz", 
-        "ambari_db_rca_username": "mapred", 
-        "db_driver_filename": "mysql-connector-java.jar", 
-        "agentCacheDir": "/var/lib/ambari-agent/cache", 
-        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
-    }, 
-    "commandType": "EXECUTION_COMMAND", 
-    "roleParams": {}, 
-    "serviceName": "PIG", 
-    "role": "PIG_SERVICE_CHECK", 
-    "forceRefreshConfigTags": [], 
-    "taskId": 180, 
-    "public_hostname": "c6403.ambari.apache.org", 
-    "configurations": {
-        "mapred-site": {
-            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
-            "mapreduce.jobhistory.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab", 
-            "mapreduce.reduce.input.buffer.percent": "0.0", 
-            "mapreduce.output.fileoutputformat.compress": "false", 
-            "mapreduce.framework.name": "yarn", 
-            "mapreduce.map.speculative": "false", 
-            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
-            "yarn.app.mapreduce.am.resource.mb": "682", 
-            "mapreduce.map.java.opts": "-Xmx546m", 
-            "mapreduce.cluster.administrators": " hadoop", 
-            "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", 
-            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
-            "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework", 
-            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
-            "mapreduce.reduce.speculative": "false", 
-            "mapreduce.reduce.java.opts": "-Xmx546m", 
-            "mapreduce.am.max-attempts": "2", 
-            "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}", 
-            "mapreduce.reduce.log.level": "INFO", 
-            "mapreduce.map.sort.spill.percent": "0.7", 
-            "mapreduce.job.emit-timeline-data": "false", 
-            "mapreduce.task.io.sort.mb": "273", 
-            "mapreduce.task.timeout": "300000", 
-            "mapreduce.map.memory.mb": "682", 
-            "mapreduce.task.io.sort.factor": "100", 
-            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
-            "mapreduce.reduce.memory.mb": "682", 
-            "mapreduce.jobhistory.principal": "jhs/_HOST@EXAMPLE.COM", 
-            "yarn.app.mapreduce.am.log.level": "INFO", 
-            "mapreduce.map.log.level": "INFO", 
-            "mapreduce.shuffle.port": "13562", 
-            "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000", 
-            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", 
-            "mapreduce.map.output.compress": "false", 
-            "yarn.app.mapreduce.am.staging-dir": "/user", 
-            "mapreduce.reduce.shuffle.parallelcopies": "30", 
-            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
-            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
-            "mapreduce.jobhistory.keytab": "/etc/security/keytabs/jhs.service.keytab", 
-            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
-            "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
-            "mapreduce.reduce.shuffle.fetch.retry.enabled": "1", 
-            "mapreduce.jobhistory.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}", 
-            "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000", 
-            "mapreduce.jobhistory.bind-host": "0.0.0.0", 
-            "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}"
-        }, 
-        "pig-env": {
-            "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
-        }, 
-        "ranger-hdfs-plugin-properties": {
-            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
-            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
-            "common.name.for.certificate": "-", 
-            "XAAUDIT.HDFS.IS_ENABLED": "false", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
-            "XAAUDIT.DB.IS_ENABLED": "true", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
-            "hadoop.rpc.protection": "-", 
-            "ranger-hdfs-plugin-enabled": "No", 
-            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
-            "policy_user": "ambari-qa", 
-            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
-            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
-            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
-            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
-            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
-            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
-            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
-        }, 
-        "kerberos-env": {
-            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}\n    ",
-            "realm": "EXAMPLE.COM", 
-            "container_dn": "", 
-            "ldap_url": "", 
-            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
-            "kdc_type": "mit-kdc",
-            "kdc_hosts": "c6401.ambari.apache.org",
-            "admin_server_host": "c6401.ambari.apache.org"
-        },
-        "tez-site": {
-            "tez.task.get-task.sleep.interval-ms.max": "200", 
-            "tez.task.max-events-per-heartbeat": "500", 
-            "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
-            "tez.runtime.compress": "true", 
-            "tez.runtime.io.sort.mb": "272", 
-            "tez.runtime.convert.user-payload.to.history-text": "false", 
-            "tez.generate.debug.artifacts": "false", 
-            "tez.am.tez-ui.history-url.template": "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__", 
-            "tez.am.log.level": "INFO", 
-            "tez.counters.max.groups": "1000", 
-            "tez.runtime.unordered.output.buffer.size-mb": "51", 
-            "tez.shuffle-vertex-manager.max-src-fraction": "0.4", 
-            "tez.counters.max": "2000", 
-            "tez.task.resource.memory.mb": "682", 
-            "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService", 
-            "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz", 
-            "tez.task.am.heartbeat.counter.interval-ms.max": "4000", 
-            "tez.am.max.app.attempts": "2", 
-            "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", 
-            "tez.am.container.idle.release-timeout-max.millis": "20000", 
-            "tez.use.cluster.hadoop-libs": "false", 
-            "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
-            "tez.am.container.idle.release-timeout-min.millis": "10000", 
-            "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec", 
-            "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
-            "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", 
-            "tez.am.container.reuse.enabled": "true", 
-            "tez.session.am.dag.submit.timeout.secs": "300", 
-            "tez.grouping.min-size": "16777216", 
-            "tez.grouping.max-size": "1073741824", 
-            "tez.session.client.timeout.secs": "-1", 
-            "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", 
-            "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
-            "tez.staging-dir": "/tmp/${user.name}/staging", 
-            "tez.am.am-rm.heartbeat.interval-ms.max": "250", 
-            "tez.am.maxtaskfailures.per.node": "10", 
-            "tez.am.container.reuse.non-local-fallback.enabled": "false", 
-            "tez.am.container.reuse.locality.delay-allocation-millis": "250", 
-            "tez.am.container.reuse.rack-fallback.enabled": "true", 
-            "tez.grouping.split-waves": "1.7", 
-            "tez.shuffle-vertex-manager.min-src-fraction": "0.2", 
-            "tez.am.resource.memory.mb": "1364"
-        }, 
-        "hdfs-site": {
-            "dfs.namenode.avoid.write.stale.datanode": "true", 
-            "dfs.permissions.superusergroup": "hdfs", 
-            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
-            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.namenode.checkpoint.txns": "1000000", 
-            "dfs.block.access.token.enable": "true", 
-            "dfs.support.append": "true", 
-            "dfs.datanode.address": "0.0.0.0:1019", 
-            "dfs.cluster.administrators": " hdfs", 
-            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
-            "dfs.namenode.safemode.threshold-pct": "1.0f", 
-            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
-            "dfs.permissions.enabled": "true", 
-            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
-            "dfs.https.port": "50470", 
-            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
-            "dfs.blockreport.initialDelay": "120", 
-            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
-            "dfs.blocksize": "134217728", 
-            "dfs.client.read.shortcircuit": "true", 
-            "dfs.datanode.max.transfer.threads": "4096", 
-            "dfs.heartbeat.interval": "3", 
-            "dfs.replication": "3", 
-            "dfs.namenode.handler.count": "100", 
-            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "fs.permissions.umask-mode": "022", 
-            "dfs.namenode.stale.datanode.interval": "30000", 
-            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
-            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
-            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
-            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
-            "dfs.webhdfs.enabled": "true", 
-            "dfs.datanode.failed.volumes.tolerated": "0", 
-            "dfs.namenode.accesstime.precision": "0", 
-            "dfs.datanode.https.address": "0.0.0.0:50475", 
-            "dfs.namenode.avoid.read.stale.datanode": "true", 
-            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
-            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.http.address": "0.0.0.0:1022", 
-            "dfs.datanode.du.reserved": "1073741824", 
-            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
-            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "dfs.http.policy": "HTTP_ONLY", 
-            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
-            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
-            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
-            "dfs.journalnode.https-address": "0.0.0.0:8481", 
-            "dfs.journalnode.http-address": "0.0.0.0:8480", 
-            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
-            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
-            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
-            "dfs.datanode.data.dir.perm": "750", 
-            "dfs.namenode.name.dir.restore": "true", 
-            "dfs.replication.max": "50", 
-            "dfs.namenode.checkpoint.period": "21600"
-        }, 
-        "tez-env": {
-            "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
-            "tez_user": "tez"
-        }, 
-        "yarn-log4j": {
-            "content": "\n#Relative to Yarn Log Dir Prefix\nyarn.log.dir=.\n#\n# Job Summary Appender\n#\n# Use following logger to send summary to separate file defined by\n# hadoop.mapreduce.jobsummary.log.file rolled daily:\n# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n#\nhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\nhadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\nlog4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n# Set the ResourceManager summary log filename\nyarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log\n# Set the ResourceManager summary log level and appender\nyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}\n#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\n\n# To enable AppSummaryLogging for the RM,\n# set yarn.server.resourcemanager.appsummary.logger to\n# LEVEL,RMSUMMARY in hadoop-env.sh\n\n# Appender for ResourceManager Application Summary Log\n# Requires the 
 following properties to be set\n#    - hadoop.log.dir (Hadoop Log directory)\n#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)\n#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)\nlog4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender\nlog4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}\nlog4j.appender.RMSUMMARY.MaxFileSize=256MB\nlog4j.appender.RMSUMMARY.MaxBackupIndex=20\nlog4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\nlog4j.appender.JSA.DatePattern=.yyyy-MM-dd\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$Applic
 ationSummary=${yarn.server.resourcemanager.appsummary.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false"
-        }, 
-        "hadoop-policy": {
-            "security.job.client.protocol.acl": "*", 
-            "security.job.task.protocol.acl": "*", 
-            "security.datanode.protocol.acl": "*", 
-            "security.namenode.protocol.acl": "*", 
-            "security.client.datanode.protocol.acl": "*", 
-            "security.inter.tracker.protocol.acl": "*", 
-            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
-            "security.client.protocol.acl": "*", 
-            "security.refresh.policy.protocol.acl": "hadoop", 
-            "security.admin.operations.protocol.acl": "hadoop", 
-            "security.inter.datanode.protocol.acl": "*"
-        }, 
-        "hdfs-log4j": {
-            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.fi
 le}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\n
 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
-        }, 
-        "mapred-env": {
-            "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"", 
-            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
-            "mapred_user": "mapred", 
-            "jobhistory_heapsize": "900", 
-            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
-        }, 
-        "krb5-conf": {
-            "conf_dir": "/etc",
-            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\n{# Append additional realm declarations bel
 ow #}\n    ",
-            "domains": "",
-            "manage_krb5_conf": "true"
-        },
-        "pig-properties": {
-            "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.\n# see bin/pig -help\n\n# brief logging (no timestamps
 )\nbrief=false\n\n# debug level, INFO is default\ndebug=INFO\n\n# verbose print all log messages to screen (default to print only INFO and above to screen)\nverbose=false\n\n# exectype local|mapreduce, mapreduce is default\nexectype=mapreduce\n\n# Enable insertion of information about script into hadoop job conf \npig.script.info.enabled=true\n\n# Do not spill temp files smaller than this size (bytes)\npig.spill.size.threshold=5000000\n\n# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)\n# This should help reduce the number of files being spilled.\npig.spill.gc.activation.size=40000000\n\n# the following two parameters are to help estimate the reducer number\npig.exec.reducers.bytes.per.reducer=1000000000\npig.exec.reducers.max=999\n\n# Temporary location to store the intermediate data.\npig.temp.dir=/tmp/\n\n# Threshold for merging FRJoin fragment files\npig.files.concatenation.threshold=100\npig.optimistic.files.concatenation=false;\n\npi
 g.disable.counter=false\n\n# Avoid pig failures when multiple jobs write to the same location\npig.location.check.strict=false\n\nhcat.bin=/usr/bin/hcat"
-        }, 
-        "core-site": {
-            "hadoop.http.authentication.signature.secret": "", 
-            "proxyuser_group": "users", 
-            "hadoop.http.authentication.cookie.domain": "", 
-            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
-            "fs.trash.interval": "360", 
-            "hadoop.http.authentication.signer.secret.provider.object": "", 
-            "hadoop.http.authentication.token.validity": "", 
-            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
-            "ipc.client.idlethreshold": "8000", 
-            "hadoop.http.authentication.cookie.path": "", 
-            "hadoop.http.authentication.signer.secret.provider": "", 
-            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
-            "hadoop.rpc.protection": "authentication", 
-            "io.file.buffer.size": "131072", 
-            "hadoop.security.authentication": "kerberos", 
-            "hadoop.http.filter.initializers": "", 
-            "mapreduce.jobtracker.webinterface.trusted": "false", 
-            "hadoop.http.authentication.kerberos.name.rules": "", 
-            "hadoop.proxyuser.HTTP.groups": "users", 
-            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
-            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
-            "hadoop.http.authentication.signature.secret.file": "", 
-            "hadoop.http.authentication.type": "simple", 
-            "hadoop.security.authorization": "true", 
-            "ipc.server.tcpnodelay": "true", 
-            "ipc.client.connect.max.retries": "50", 
-            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jhs@EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nm@EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rm@EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn/\nDEFAULT", 
-            "ipc.client.connection.maxidletime": "30000"
-        }, 
-        "yarn-env": {
-            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
-            "apptimelineserver_heapsize": "1024", 
-            "nodemanager_heapsize": "1024", 
-            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"", 
-            "yarn_heapsize": "1024", 
-            "min_user_id": "500", 
-            "yarn_user": "yarn", 
-            "resourcemanager_heapsize": "1024", 
-            "yarn_log_dir_prefix": "/var/log/hadoop-yarn"
-        }, 
-        "hadoop-env": {
-            "dtnode_heapsize": "1024m", 
-            "namenode_opt_maxnewsize": "256m", 
-            "hdfs_log_dir_prefix": "/var/log/hadoop", 
-            "namenode_heapsize": "1024m", 
-            "proxyuser_group": "users", 
-            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
-            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
-            "hdfs_user": "hdfs", 
-            "namenode_opt_newsize": "256m", 
-            "hadoop_root_logger": "INFO,RFA", 
-            "hadoop_heapsize": "1024", 
-            "namenode_opt_maxpermsize": "256m", 
-            "namenode_opt_permsize": "128m", 
-            "hdfs_principal_name": "hdfs@EXAMPLE.COM"
-        }, 
-        "zookeeper-log4j": {
-            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
-        }, 
-        "yarn-site": {
-            "yarn.timeline-service.http-authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
-            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
-            "yarn.resourcemanager.zk-num-retries": "1000", 
-            "yarn.timeline-service.http-authentication.signature.secret.file": "", 
-            "yarn.timeline-service.bind-host": "0.0.0.0", 
-            "yarn.resourcemanager.ha.enabled": "false", 
-            "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn", 
-            "yarn.timeline-service.http-authentication.signature.secret": "", 
-            "yarn.timeline-service.webapp.address": "c6402.ambari.apache.org:8188", 
-            "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}", 
-            "yarn.timeline-service.enabled": "true", 
-            "yarn.nodemanager.recovery.enabled": "true", 
-            "yarn.timeline-service.principal": "yarn/_HOST@EXAMPLE.COM", 
-            "yarn.nodemanager.keytab": "/etc/security/keytabs/nm.service.keytab", 
-            "yarn.timeline-service.address": "c6402.ambari.apache.org:10200", 
-            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
-            "yarn.resourcemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "yarn.resourcemanager.am.max-attempts": "2", 
-            "yarn.nodemanager.log-aggregation.debug-enabled": "false", 
-            "yarn.resourcemanager.system-metrics-publisher.enabled": "true", 
-            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
-            "yarn.nodemanager.bind-host": "0.0.0.0", 
-            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
-            "yarn.nodemanager.linux-container-executor.cgroups.mount": "false", 
-            "yarn.timeline-service.http-authentication.cookie.path": "", 
-            "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10", 
-            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
-            "yarn.nodemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "yarn.timeline-service.keytab": "/etc/security/keytabs/yarn.service.keytab", 
-            "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*", 
-            "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false", 
-            "yarn.resourcemanager.keytab": "/etc/security/keytabs/rm.service.keytab", 
-            "yarn.resourcemanager.principal": "rm/_HOST@EXAMPLE.COM", 
-            "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", 
-            "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false", 
-            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
-            "yarn.resourcemanager.connect.max-wait.ms": "900000", 
-            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
-            "yarn.timeline-service.http-authentication.token.validity": "", 
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true", 
-            "yarn.scheduler.maximum-allocation-mb": "2048", 
-            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
-            "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500", 
-            "yarn.resourcemanager.zk-acl": "world:anyone:rwcda", 
-            "yarn.resourcemanager.webapp.https.address": "c6402.ambari.apache.org:8090", 
-            "yarn.log-aggregation-enable": "true", 
-            "yarn.nodemanager.delete.debug-delay-sec": "0", 
-            "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore", 
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "", 
-            "yarn.timeline-service.client.retry-interval-ms": "1000", 
-            "hadoop.registry.zk.quorum": "c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181", 
-            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
-            "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler", 
-            "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90", 
-            "yarn.resourcemanager.zk-timeout-ms": "10000", 
-            "yarn.resourcemanager.fs.state-store.uri": " ", 
-            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
-            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
-            "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000", 
-            "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore", 
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "", 
-            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
-            "yarn.nodemanager.principal": "nm/_HOST@EXAMPLE.COM", 
-            "yarn.resourcemanager.work-preserving-recovery.enabled": "true", 
-            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
-            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
-            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
-            "yarn.nodemanager.resource.memory-mb": "2048", 
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "", 
-            "yarn.nodemanager.resource.cpu-vcores": "1", 
-            "yarn.resourcemanager.proxyusers.*.users": "", 
-            "yarn.timeline-service.ttl-ms": "2678400000", 
-            "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100", 
-            "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000", 
-            "yarn.resourcemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab", 
-            "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1", 
-            "yarn.nodemanager.log.retain-seconds": "604800",
-            "yarn.timeline-service.http-authentication.type": "kerberos", 
-            "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
-            "yarn.resourcemanager.proxyusers.*.groups": "", 
-            "yarn.timeline-service.client.max-retries": "30", 
-            "yarn.nodemanager.health-checker.interval-ms": "135000", 
-            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
-            "yarn.nodemanager.vmem-check-enabled": "false", 
-            "yarn.acl.enable": "true", 
-            "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager", 
-            "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600", 
-            "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler", 
-            "yarn.client.nodemanager-connect.max-wait-ms": "60000", 
-            "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", 
-            "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", 
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "", 
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "", 
-            "yarn.resourcemanager.bind-host": "0.0.0.0", 
-            "yarn.http.policy": "HTTP_ONLY", 
-            "yarn.resourcemanager.zk-address": "c6402.ambari.apache.org:2181", 
-            "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state", 
-            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor", 
-            "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore", 
-            "yarn.nodemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab", 
-            "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline", 
-            "yarn.scheduler.minimum-allocation-mb": "682", 
-            "yarn.timeline-service.ttl-enable": "true", 
-            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
-            "yarn.log-aggregation.retain-seconds": "2592000", 
-            "yarn.nodemanager.address": "0.0.0.0:45454", 
-            "hadoop.registry.rm.enabled": "false", 
-            "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", 
-            "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", 
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "", 
-            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
-            "yarn.timeline-service.http-authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
-            "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", 
-            "yarn.resourcemanager.recovery.enabled": "true", 
-            "yarn.timeline-service.http-authentication.cookie.domain": "", 
-            "yarn.resourcemanager.zk-retry-interval-ms": "1000", 
-            "yarn.admin.acl": "",
-            "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels", 
-            "yarn.client.nodemanager-connect.retry-interval-ms": "10000", 
-            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
-            "yarn.timeline-service.webapp.https.address": "c6402.ambari.apache.org:8190", 
-            "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", 
-            "yarn.resourcemanager.connect.retry-interval.ms": "30000", 
-            "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000", 
-            "yarn.resourcemanager.proxyusers.*.hosts": ""
-        }, 
-        "capacity-scheduler": {
-            "yarn.scheduler.capacity.default.minimum-user-limit-percent": "100", 
-            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
-            "yarn.scheduler.capacity.root.default.user-limit-factor": "1", 
-            "yarn.scheduler.capacity.root.accessible-node-labels": "*", 
-            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
-            "yarn.scheduler.capacity.root.capacity": "100", 
-            "yarn.scheduler.capacity.root.default.capacity": "100", 
-            "yarn.scheduler.capacity.root.queues": "default", 
-            "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": "-1", 
-            "yarn.scheduler.capacity.root.default-node-label-expression": " ", 
-            "yarn.scheduler.capacity.node-locality-delay": "40", 
-            "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": "-1", 
-            "yarn.scheduler.capacity.root.default.acl_submit_applications": "*", 
-            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
-            "yarn.scheduler.capacity.root.acl_administer_queue": "*", 
-            "yarn.scheduler.capacity.maximum-applications": "10000", 
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
-            "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator"
-        }, 
-        "zoo.cfg": {
-            "clientPort": "2181", 
-            "autopurge.purgeInterval": "24", 
-            "syncLimit": "5", 
-            "dataDir": "/hadoop/zookeeper", 
-            "initLimit": "10", 
-            "tickTime": "2000", 
-            "autopurge.snapRetainCount": "30"
-        }, 
-        "zookeeper-env": {
-            "zk_user": "zookeeper", 
-            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", 
-            "zk_log_dir": "/var/log/zookeeper", 
-            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
-            "zk_pid_dir": "/var/run/zookeeper", 
-            "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM"
-        }, 
-        "pig-log4j": {
-            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n# ***** Set root logger level to DEBUG and its only appender to A.\nlog4j.logger.org.apache.pig=info, A\n\n# ***** A is set to be a ConsoleAppender.\
 nlog4j.appender.A=org.apache.log4j.ConsoleAppender\n# ***** A uses PatternLayout.\nlog4j.appender.A.layout=org.apache.log4j.PatternLayout\nlog4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n"
-        }, 
-        "cluster-env": {
-            "managed_hdfs_resource_property_names": "",
-            "security_enabled": "true",
-            "ignore_groupsusers_create": "false",
-            "kerberos_domain": "EXAMPLE.COM",
-            "user_group": "hadoop",
-            "smokeuser_principal_name": "ambari-qa@EXAMPLE.COM",
-            "smokeuser": "ambari-qa",
-            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab"
-        }
-    }, 
-    "configurationTags": {
-        "mapred-site": {
-            "tag": "version1425150589654"
-        }, 
-        "pig-env": {
-            "tag": "version1425325831978"
-        }, 
-        "ranger-hdfs-plugin-properties": {
-            "tag": "version1"
-        }, 
-        "kerberos-env": {
-            "tag": "version1425149782373"
-        }, 
-        "tez-site": {
-            "tag": "version1"
-        }, 
-        "hdfs-site": {
-            "tag": "version1425150589741"
-        }, 
-        "tez-env": {
-            "tag": "version1"
-        }, 
-        "yarn-log4j": {
-            "tag": "version1"
-        }, 
-        "hadoop-policy": {
-            "tag": "version1"
-        }, 
-        "hdfs-log4j": {
-            "tag": "version1"
-        }, 
-        "mapred-env": {
-            "tag": "version1"
-        }, 
-        "krb5-conf": {
-            "tag": "version1425149782373"
-        }, 
-        "pig-properties": {
-            "tag": "version1425325831978"
-        }, 
-        "core-site": {
-            "tag": "version1425150589818"
-        }, 
-        "yarn-env": {
-            "tag": "version1"
-        }, 
-        "hadoop-env": {
-            "tag": "version1425150589788"
-        }, 
-        "zookeeper-log4j": {
-            "tag": "version1"
-        }, 
-        "yarn-site": {
-            "tag": "version1425150589763"
-        }, 
-        "capacity-scheduler": {
-            "tag": "version1"
-        }, 
-        "zoo.cfg": {
-            "tag": "version1"
-        }, 
-        "zookeeper-env": {
-            "tag": "version1425150589681"
-        }, 
-        "pig-log4j": {
-            "tag": "version1425325831978"
-        }, 
-        "cluster-env": {
-            "tag": "version1425150589709"
-        }
-    }, 
-    "commandId": "15-0", 
-    "clusterHostInfo": {
-        "snamenode_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "nm_hosts": [
-            "c6403.ambari.apache.org", 
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "app_timeline_server_hosts": [
-            "c6402.ambari.apache.org"
-        ], 
-        "all_ping_ports": [
-            "8670", 
-            "8670", 
-            "8670"
-        ], 
-        "rm_host": [
-            "c6402.ambari.apache.org"
-        ], 
-        "all_hosts": [
-            "c6403.ambari.apache.org", 
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "slave_hosts": [
-            "c6403.ambari.apache.org", 
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "namenode_host": [
-            "c6401.ambari.apache.org"
-        ], 
-        "ambari_server_host": [
-            "c6401.ambari.apache.org"
-        ], 
-        "zookeeper_hosts": [
-            "c6403.ambari.apache.org", 
-            "c6401.ambari.apache.org", 
-            "c6402.ambari.apache.org"
-        ], 
-        "hs_host": [
-            "c6402.ambari.apache.org"
-        ]
-    }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
index 3d0dc28..bcc5359 100644
--- a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
+++ b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
@@ -796,7 +796,7 @@
     }, {
       "yarn-site": {
         "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-        "yarn.resourcemanager.proxyuser.*.users": "",
+        "yarn.resourcemanager.proxyusers.*.users": "",
         "yarn.timeline-service.http-authentication.token.validity": "",
         "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
         "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
@@ -805,14 +805,14 @@
         "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
         "yarn.acl.enable": "true",
         "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-        "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
-        "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+        "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+        "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
         "yarn.timeline-service.http-authentication.signature.secret": "",
         "yarn.timeline-service.http-authentication.signature.secret.file": "",
-        "yarn.resourcemanager.proxyuser.*.hosts": "",
-        "yarn.resourcemanager.proxyuser.*.groups": "",
+        "yarn.resourcemanager.proxyusers.*.hosts": "",
+        "yarn.resourcemanager.proxyusers.*.groups": "",
         "yarn.timeline-service.enabled": "true",
-        "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+        "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
         "yarn.timeline-service.http-authentication.cookie.domain": ""
       }
     }, {


[19/50] [abbrv] ambari git commit: AMBARI-19579. Hive View 2.0: Show precision and scale of a column in table manager view. (dipayanb)

Posted by nc...@apache.org.
AMBARI-19579. Hive View 2.0: Show precision and scale of a column in table manager view. (dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fecf197c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fecf197c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fecf197c

Branch: refs/heads/branch-dev-patch-upgrade
Commit: fecf197c710dcd99cc2c8d1189e393fc227e5fb3
Parents: e83837e
Author: Dipayan Bhowmick <di...@gmail.com>
Authored: Wed Jan 18 00:19:39 2017 +0530
Committer: Dipayan Bhowmick <di...@gmail.com>
Committed: Wed Jan 18 00:20:12 2017 +0530

----------------------------------------------------------------------
 .../app/components/table-advanced-settings.js   |  1 -
 .../ui/app/helpers/format-column-size.js        | 39 ++++++++++++++++++++
 .../databases/database/tables/table/columns.js  | 10 +++++
 .../ui/app/templates/components/column-item.hbs |  2 +-
 .../databases/database/tables/table/columns.hbs | 14 +++++--
 .../database/tables/table/partitions.hbs        |  2 +-
 6 files changed, 61 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fecf197c/contrib/views/hive20/src/main/resources/ui/app/components/table-advanced-settings.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/components/table-advanced-settings.js b/contrib/views/hive20/src/main/resources/ui/app/components/table-advanced-settings.js
index 181816a..5e50a5c 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/components/table-advanced-settings.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/components/table-advanced-settings.js
@@ -58,7 +58,6 @@ export default Ember.Component.extend({
     } else {
       let defaultFileFormat = this.get('fileFormats').findBy('default', true);
       this.set('settings.fileFormat', {});
-      debugger;
       this.set('settings.fileFormat.type', defaultFileFormat.name);
     }
     if (!Ember.isEmpty(this.get('settings.rowFormat'))) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/fecf197c/contrib/views/hive20/src/main/resources/ui/app/helpers/format-column-size.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/helpers/format-column-size.js b/contrib/views/hive20/src/main/resources/ui/app/helpers/format-column-size.js
new file mode 100644
index 0000000..a24f797
--- /dev/null
+++ b/contrib/views/hive20/src/main/resources/ui/app/helpers/format-column-size.js
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export function formatColumnSize(params/*, hash*/) {
+  const precision = params[0];
+  const scale = params[1];
+  if (Ember.isEmpty(precision) && Ember.isEmpty(scale)) {
+    return '';
+  }
+  let sizeString = '( ';
+  if (precision) {
+    sizeString = `${sizeString}${precision}`
+  }
+  if (scale) {
+    sizeString = `${sizeString}, ${scale}`;
+  }
+  sizeString = `${sizeString} )`;
+
+  return sizeString;
+}
+
+export default Ember.Helper.helper(formatColumnSize);

http://git-wip-us.apache.org/repos/asf/ambari/blob/fecf197c/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table/columns.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table/columns.js b/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table/columns.js
index a11a4de..694cf21 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table/columns.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/databases/database/tables/table/columns.js
@@ -20,4 +20,14 @@ import TableMetaRouter from './table-meta-router';
 
 export default TableMetaRouter.extend({
 
+  setupController: function (controller, model) {
+    this._super(controller, model);
+    let table = controller.get('table');
+    let clusteredColumns = table.get('storageInfo.bucketCols');
+    let columns = table.get('columns');
+    columns.forEach((column) => {
+      column.isClustered = !!clusteredColumns.contains(column.name);
+    });
+  },
+
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/fecf197c/contrib/views/hive20/src/main/resources/ui/app/templates/components/column-item.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/column-item.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/column-item.hbs
index 96cf5ab..73fac89 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/column-item.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/column-item.hbs
@@ -98,7 +98,7 @@
               <div class="col-sm-offset-2 col-sm-10">
                 <div class="checkbox">
                   <label>
-                    {{input type="checkbox" checked=column.isClustered disabled=(not column.editing)}} Clustering
+                    {{input type="checkbox" checked=column.isClustered disabled=(not column.editing)}} Clustered
                   </label>
                 </div>
               </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fecf197c/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/columns.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/columns.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/columns.hbs
index f7f01f5..ef2ea21 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/columns.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/columns.hbs
@@ -20,17 +20,23 @@
   <table class="table table-bordered table-hover">
     <thead>
     <tr>
-      <th>COLUMN NAME</th>
-      <th>COLUMN TYPE</th>
-      <th>COMMENT</th>
+      <th width="30%">COLUMN NAME</th>
+      <th width="20%">COLUMN TYPE</th>
+      <th width="40%">COMMENT</th>
+      <th width="10%">CLUSTERED</th>
     </tr>
     </thead>
     <tbody>
     {{#each table.columns as |column|}}
       <tr>
         <th>{{column.name}}</th>
-        <td>{{column.type}}</td>
+        <td>{{column.type}} {{format-column-size column.precision column.scale}}</td>
         <td>{{column.comment}}</td>
+        <td class="text-center">
+          {{#if column.isClustered}}
+            <span class="text-primary">{{fa-icon "check"}}</span>
+          {{/if}}
+        </td>
       </tr>
     {{/each}}
     </tbody>

http://git-wip-us.apache.org/repos/asf/ambari/blob/fecf197c/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/partitions.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/partitions.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/partitions.hbs
index f66a0bc..2c4beb9 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/partitions.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/databases/database/tables/table/partitions.hbs
@@ -29,7 +29,7 @@
     {{#each model.partitionInfo.columns as |column|}}
       <tr>
         <th>{{column.name}}</th>
-        <td>{{column.type}}</td>
+        <td>{{column.type}} {{format-column-size column.precision column.scale}}</td>
         <td>{{column.comment}}</td>
       </tr>
     {{/each}}


[31/50] [abbrv] ambari git commit: Revert "AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)"

Posted by nc...@apache.org.
Revert "AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)"

This reverts commit c689096d13c62beafe1eb52a8aa9a4a8c4d9cd63.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c916dda5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c916dda5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c916dda5

Branch: refs/heads/branch-dev-patch-upgrade
Commit: c916dda5d78ddb2f1e68ba7bfd359be5af7dcbc2
Parents: c689096
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Jan 17 13:28:43 2017 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Jan 17 13:28:43 2017 -0800

----------------------------------------------------------------------
 .../YARN/2.1.0.2.0/kerberos.json                |   12 +-
 .../YARN/2.1.0.2.0/kerberos.json.orig           |  216 --
 .../YARN/3.0.0.3.0/kerberos.json                |   12 +-
 .../YARN/3.0.0.3.0/kerberos.json.orig           |  280 --
 .../stacks/HDP/2.2/services/YARN/kerberos.json  |   12 +-
 .../HDP/2.2/services/YARN/kerberos.json.orig    |  217 --
 .../HDP/2.3.ECS/services/YARN/kerberos.json     |   12 +-
 .../2.3.ECS/services/YARN/kerberos.json.orig    |  220 --
 .../stacks/HDP/2.3/services/YARN/kerberos.json  |   12 +-
 .../HDP/2.3/services/YARN/kerberos.json.orig    |  226 --
 .../stacks/HDP/2.5/services/YARN/kerberos.json  |   12 +-
 .../HDP/2.5/services/YARN/kerberos.json.orig    |  280 --
 .../stacks/PERF/1.0/services/YARN/kerberos.json |   12 +-
 .../PERF/1.0/services/YARN/kerberos.json.orig   |  278 --
 .../2.2/configs/pig-service-check-secure.json   |   12 +-
 .../configs/pig-service-check-secure.json.orig  |  651 ----
 .../test_kerberos_descriptor_2_1_3.json         |   12 +-
 .../test_kerberos_descriptor_2_1_3.json.orig    | 1320 --------
 .../data/stacks/HDP-2.1/service_components.json |   12 +-
 .../stacks/HDP-2.1/service_components.json.orig | 3170 ------------------
 .../app/data/configs/wizards/secure_mapping.js  |   12 +-
 21 files changed, 66 insertions(+), 6924 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
index c8b5989..6b61c13 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
@@ -23,13 +23,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyuser.*.groups": "",
-            "yarn.resourcemanager.proxyuser.*.hosts": "",
-            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json.orig b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json.orig
deleted file mode 100644
index 6b61c13..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json.orig
+++ /dev/null
@@ -1,216 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "false",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
index fb85e7a..d334887 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyuser.*.groups": "",
-            "yarn.resourcemanager.proxyuser.*.hosts": "",
-            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json.orig b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json.orig
deleted file mode 100644
index d334887..0000000
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json.orig
+++ /dev/null
@@ -1,280 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "true",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        },
-        {
-          "capacity-scheduler": {
-            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
-          }
-        },
-        {
-          "ranger-yarn-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
-              },
-              "keytab": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "llap_zk_hive",
-              "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            },
-            {
-              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
-              "principal": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
index 85a3221..ad30b76 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
@@ -23,13 +23,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyuser.*.groups": "",
-            "yarn.resourcemanager.proxyuser.*.hosts": "",
-            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json.orig
deleted file mode 100644
index ad30b76..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json.orig
+++ /dev/null
@@ -1,217 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "true",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
index e27513a..7977941 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
@@ -26,13 +26,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyuser.*.groups": "",
-            "yarn.resourcemanager.proxyuser.*.hosts": "",
-            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json.orig
deleted file mode 100644
index 7977941..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json.orig
+++ /dev/null
@@ -1,220 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/ECS/hdfs"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "false",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.yarn.groups": "*",
-            "hadoop.proxyuser.yarn.hosts": "${yarn-site/yarn.resourcemanager.hostname}"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/ECS/hdfs"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
index bf0280b..73addb1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyuser.*.groups": "",
-            "yarn.resourcemanager.proxyuser.*.hosts": "",
-            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json.orig
deleted file mode 100644
index 73addb1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json.orig
+++ /dev/null
@@ -1,226 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "true",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        },
-        {
-          "capacity-scheduler": {
-            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
index fb85e7a..d334887 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyuser.*.groups": "",
-            "yarn.resourcemanager.proxyuser.*.hosts": "",
-            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json.orig
deleted file mode 100644
index d334887..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json.orig
+++ /dev/null
@@ -1,280 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "true",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
-            "hadoop.registry.secure" : "true",
-            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        },
-        {
-          "capacity-scheduler": {
-            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
-          }
-        },
-        {
-          "ranger-yarn-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
-              },
-              "keytab": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "llap_zk_hive",
-              "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            },
-            {
-              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
-              "principal": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
index 2735323..7e74237 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyuser.*.groups": "",
-            "yarn.resourcemanager.proxyuser.*.hosts": "",
-            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure"
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json.orig
deleted file mode 100644
index 7e74237..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json.orig
+++ /dev/null
@@ -1,278 +0,0 @@
-{
-  "services": [
-    {
-      "name": "YARN",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "configurations": [
-        {
-          "yarn-site": {
-            "yarn.timeline-service.enabled": "true",
-            "yarn.timeline-service.http-authentication.type": "kerberos",
-            "yarn.acl.enable": "true",
-            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
-            "yarn.timeline-service.http-authentication.signature.secret": "",
-            "yarn.timeline-service.http-authentication.signature.secret.file": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-            "yarn.timeline-service.http-authentication.token.validity": "",
-            "yarn.timeline-service.http-authentication.cookie.domain": "",
-            "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
-            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure"
-          }
-        },
-        {
-          "core-site": {
-            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
-            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
-          }
-        },
-        {
-          "capacity-scheduler": {
-            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
-            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
-          }
-        },
-        {
-          "ranger-yarn-audit": {
-            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
-            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
-            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
-            "xasecure.audit.jaas.Client.option.storeKey": "false",
-            "xasecure.audit.jaas.Client.option.serviceName": "solr",
-            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "NODEMANAGER",
-          "identities": [
-            {
-              "name": "nodemanager_nm",
-              "principal": {
-                "value": "nm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.nodemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/nm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.nodemanager.keytab"
-              }
-            },
-            {
-              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
-              "principal": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
-              },
-              "keytab": {
-                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "llap_zk_hive",
-              "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
-                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
-              },
-              "when" : {
-                "contains" : ["services", "HIVE"]
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
-              }
-            }
-          ],
-          "configurations": [
-            {
-              "yarn-site": {
-                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
-              }
-            }
-          ]
-        },
-        {
-          "name": "RESOURCEMANAGER",
-          "identities": [
-            {
-              "name": "resource_manager_rm",
-              "principal": {
-                "value": "rm/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.resourcemanager.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/rm.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.resourcemanager.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
-              }
-            },
-            {
-              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
-              "principal": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
-              },
-              "keytab": {
-                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "APP_TIMELINE_SERVER",
-          "identities": [
-            {
-              "name": "app_timeline_server_yarn",
-              "principal": {
-                "value": "yarn/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "yarn-site/yarn.timeline-service.principal",
-                "local_username": "${yarn-env/yarn_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/yarn.service.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "yarn-site/yarn.timeline-service.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
-              },
-              "keytab": {
-                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
-              }
-            },
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    },
-    {
-      "name": "MAPREDUCE2",
-      "identities": [
-        {
-          "name": "/spnego"
-        },
-        {
-          "name": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "HISTORYSERVER",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            },
-            {
-              "name": "history_server_jhs",
-              "principal": {
-                "value": "jhs/_HOST@${realm}",
-                "type" : "service",
-                "configuration": "mapred-site/mapreduce.jobhistory.principal",
-                "local_username": "${mapred-env/mapred_user}"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/jhs.service.keytab",
-                "owner": {
-                  "name": "${mapred-env/mapred_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
-              }
-            },
-            {
-              "name": "/spnego",
-              "principal": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
-              },
-              "keytab": {
-                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
-              }
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
index 0ac9e78..f14eb52 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
@@ -402,7 +402,7 @@
             "yarn.log-aggregation-enable": "true", 
             "yarn.nodemanager.delete.debug-delay-sec": "0", 
             "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore", 
-            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "", 
             "yarn.timeline-service.client.retry-interval-ms": "1000", 
             "hadoop.registry.zk.quorum": "c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181", 
             "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
@@ -424,7 +424,7 @@
             "yarn.nodemanager.resource.memory-mb": "2048", 
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "", 
             "yarn.nodemanager.resource.cpu-vcores": "1", 
-            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.proxyusers.*.users": "", 
             "yarn.timeline-service.ttl-ms": "2678400000", 
             "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100", 
             "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000", 
@@ -433,7 +433,7 @@
             "yarn.nodemanager.log.retain-seconds": "604800",
             "yarn.timeline-service.http-authentication.type": "kerberos", 
             "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
-            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "", 
             "yarn.timeline-service.client.max-retries": "30", 
             "yarn.nodemanager.health-checker.interval-ms": "135000", 
             "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
@@ -445,7 +445,7 @@
             "yarn.client.nodemanager-connect.max-wait-ms": "60000", 
             "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", 
             "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", 
-            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "", 
             "yarn.timeline-service.http-authentication.signer.secret.provider": "", 
             "yarn.resourcemanager.bind-host": "0.0.0.0", 
             "yarn.http.policy": "HTTP_ONLY", 
@@ -463,7 +463,7 @@
             "hadoop.registry.rm.enabled": "false", 
             "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", 
             "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", 
-            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "", 
             "yarn.nodemanager.log-aggregation.compression-type": "gz", 
             "yarn.timeline-service.http-authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
             "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", 
@@ -478,7 +478,7 @@
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", 
             "yarn.resourcemanager.connect.retry-interval.ms": "30000", 
             "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000", 
-            "yarn.resourcemanager.proxyuser.*.hosts": ""
+            "yarn.resourcemanager.proxyusers.*.hosts": ""
         }, 
         "capacity-scheduler": {
             "yarn.scheduler.capacity.default.minimum-user-limit-percent": "100", 


[41/50] [abbrv] ambari git commit: AMBARI-19581. Need to show proper error message in scenarios of failures with asset import (Madhan Mohan Reddy via gauravn7)

Posted by nc...@apache.org.
AMBARI-19581. Need to show proper error message in scenarios of failures with asset import (Madhan Mohan Reddy via gauravn7)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7d2388b3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7d2388b3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7d2388b3

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 7d2388b35b7c6b477244c7ed5e9a1f480e98ccf9
Parents: e0d78ed
Author: Gaurav Nagar <gr...@gmail.com>
Authored: Wed Jan 18 12:46:56 2017 +0530
Committer: Gaurav Nagar <gr...@gmail.com>
Committed: Wed Jan 18 12:46:56 2017 +0530

----------------------------------------------------------------------
 .../resources/ui/app/components/asset-list.js     | 15 ++++-----------
 .../resources/ui/app/components/flow-designer.js  | 18 +++++++++++++++---
 .../src/main/resources/ui/app/styles/app.less     |  5 +++++
 .../ui/app/templates/components/asset-config.hbs  |  3 +--
 .../ui/app/templates/components/asset-list.hbs    |  9 ++++++---
 .../ui/app/templates/components/asset-manager.hbs |  5 ++++-
 .../ui/app/templates/components/flow-designer.hbs |  8 ++++----
 7 files changed, 39 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7d2388b3/contrib/views/wfmanager/src/main/resources/ui/app/components/asset-list.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/asset-list.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/asset-list.js
index 2ea82b0..9ad0494 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/asset-list.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/asset-list.js
@@ -46,20 +46,9 @@ export default Ember.Component.extend({
     this.$('#asset_list_dialog').modal().on('hidden.bs.modal', function() {
       this.sendAction('showAssetList', false);
     }.bind(this));
-
-    this.$('#asset-list').on('click', 'tr', function(event) {
-      if(!$(this).hasClass('active-asset-row')) {
-        $(this).addClass('active-asset-row').siblings().removeClass('active-asset-row');
-      }
-      self.set('currentAssetId', $(this).data("assetId"));
-      self.set('assetNotSelected', false);
-    });
     this.initializeFuseSearch();
   }.on('didInsertElement'),
   initializeFuseSearch() {
-     // var fuse = new Fuse(this.get("assetList"), this.get('fuseSearchOptions'));
-     // this.set('fuse', fuse);
-     // this.set('filteredAssetList', fuse.search(this.get("assetSearchCriteria")));
      this.set('fuse', new Fuse(this.get("assetList"), this.get('fuseSearchOptions')));
      this.set('filteredAssetList', this.get("assetList"));
    },
@@ -77,6 +66,10 @@ export default Ember.Component.extend({
     importAsset() {
       this.$('#asset_list_dialog').modal('hide');
       this.sendAction('importAsset', this.get('assetList').filterBy('id', this.currentAssetId.toString())[0]);
+    },
+    selectAsset(assetId) {
+      this.set('currentAssetId', assetId);
+      this.set('assetNotSelected', false);
     }
   }
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d2388b3/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
index d0c05d6..8bbe831 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
@@ -352,13 +352,14 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
   importActionSettingsFromString(actionSettings) {
     var x2js = new X2JS();
     var actionSettingsObj = x2js.xml_str2json(actionSettings);
+    var actionSettingsObjType = Object.keys(actionSettingsObj)[0];
     var currentActionNode = this.flowRenderer.currentCyNode.data().node;
-    if (actionSettingsObj[currentActionNode.actionType]) {
+    if (actionSettingsObjType === currentActionNode.actionType) {
       var actionJobHandler = this.actionTypeResolver.getActionJobHandler(currentActionNode.actionType);
       actionJobHandler.handleImport(currentActionNode, actionSettingsObj[currentActionNode.actionType]);
       this.flowRenderer.hideOverlayNodeActions();
     } else {
-      this.set("errorMsg", "Invalid asset settings");
+      this.set("errorMsg", actionSettingsObjType + " action settings can't be imported to " + currentActionNode.actionType + " action");
     }
   },
   importActionNodeFromString(actionNodeXmlString) {
@@ -399,6 +400,7 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
   exportActionNodeXml() {
     var self = this;
     self.set("isAssetPublishing", true);
+    self.set("errorMsg", "");
     var workflowGenerator = WorkflowGenerator.create({workflow:this.get("workflow"), workflowContext:this.get('workflowContext')});
     var actionNodeXml = workflowGenerator.getActionNodeXml(this.flowRenderer.currentCyNode.data().name, this.flowRenderer.currentCyNode.data().node.actionType);
     var dynamicProperties = this.get('propertyExtractor').getDynamicProperties(actionNodeXml);
@@ -931,6 +933,7 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
       var self = this;
       this.set("showingActionSettingsFileBrowser", false);
       if(this.get('actionSettingsFilePath')){
+        self.set("errorMsg", "");
         var actionSettingsXmlDefered=this.getWorkflowFromHdfs(this.get('actionSettingsFilePath'));
         actionSettingsXmlDefered.promise.then(function(data){
           this.importActionSettingsFromString(data);
@@ -949,6 +952,7 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
       var self = this;
       this.set("showingImportActionNodeFileBrowser", false);
       if(this.get('actionNodeFilePath')){
+        self.set("errorMsg", "");
         var actionSettingsXmlDefered=this.getWorkflowFromHdfs(this.get('actionNodeFilePath'));
         actionSettingsXmlDefered.promise.then(function(data){
           this.importActionNodeFromString(data);
@@ -1077,6 +1081,7 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
     saveAssetConfig() {
       var self=this;
       self.set("isAssetPublishing", true);
+      self.set("errorMsg", "");
       var workflowGenerator = WorkflowGenerator.create({workflow:self.get("workflow"), workflowContext:self.get('workflowContext')});
       var actionNodeXml = workflowGenerator.getActionNodeXml(self.flowRenderer.currentCyNode.data().name, self.flowRenderer.currentCyNode.data().node.actionType);
       var dynamicProperties = self.get('propertyExtractor').getDynamicProperties(actionNodeXml);
@@ -1094,9 +1099,13 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
     showAssetList(value) {
       var self=this;
       if (value) {
+        self.set("errorMsg", "");
         var fetchAssetsDefered=self.get("assetManager").fetchAssets();
         fetchAssetsDefered.promise.then(function(response){
-          self.set('assetList', JSON.parse(response).data);
+          var assetData = JSON.parse(response).data;
+          assetData = assetData.filterBy('type', self.flowRenderer.currentCyNode.data().node.actionType);
+          self.set('assetList', assetData);
+          self.set('assetListType', self.flowRenderer.currentCyNode.data().node.actionType);
           self.set('showingAssetList', value);
         }.bind(this)).catch(function(data){
           self.set("errorMsg", "There is some problem while fetching assets. Please try again.");
@@ -1109,6 +1118,7 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
     importAsset(asset) {
       var self=this;
       self.set("isAssetImporting", true);
+      self.set("errorMsg", "");
       var importAssetDefered=self.get("assetManager").importAssetDefinition(asset.id);
       importAssetDefered.promise.then(function(response){
         var importedAsset = JSON.parse(response).data;
@@ -1123,6 +1133,7 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
     showAssetNodeList(value) {
       var self=this;
       if (value) {
+        self.set("errorMsg", "");
         var fetchAssetsDefered=self.get("assetManager").fetchAssets();
         fetchAssetsDefered.promise.then(function(response){
           self.set('assetList', JSON.parse(response).data);
@@ -1138,6 +1149,7 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
     importAssetNode(asset) {
       var self=this;
       self.set("isAssetImporting", true);
+      self.set("errorMsg", "");
       var importAssetDefered=self.get("assetManager").importAssetDefinition(asset.id);
       importAssetDefered.promise.then(function(response){
         var importedAsset = JSON.parse(response).data;

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d2388b3/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less b/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
index bc0e419..05bdb5a 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
@@ -1627,3 +1627,8 @@ input:invalid {
   width: 100%;
   min-height: 100px;
 }
+.no-asset-records {
+  text-align: center;
+  padding: 3px;
+  overflow-y: auto;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d2388b3/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-config.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-config.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-config.hbs
index 4ee6d05..3ae4bff 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-config.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-config.hbs
@@ -22,12 +22,11 @@
         <button type="button" class="close" data-dismiss="modal" aria-label="Close" {{action 'close'}}>
           <span aria-hidden="true">&times;</span>
         </button>
-        <h4 class="modal-title">Add Asset</h4>
+        <h4 class="modal-title">Publish Asset</h4>
       </div>
       <div class="modal-body">
           <form class="form-horizontal">
             <div class="panel panel-default">
-              <!-- <div class="panel-heading">General</div> -->
               <div class="panel-body">
                 <div class="form-group">
                   <label for="assetName" class="control-label col-xs-2">Name</label>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d2388b3/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-list.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-list.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-list.hbs
index d377d2c..bc5201d 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-list.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-list.hbs
@@ -22,7 +22,7 @@
         <button type="button" class="close" data-dismiss="modal" aria-label="Close" {{action 'close'}}>
           <span aria-hidden="true">&times;</span>
         </button>
-        <h4 class="modal-title">Select Asset</h4>
+        <h4 class="modal-title">Import Asset</h4>
       </div>
       <div class="modal-body">
         {{input type="text" class="form-control marginBottom10" name="assetSearchCriteria" value=assetSearchCriteria placeholder="Asset Search"}}
@@ -38,11 +38,14 @@
             </thead>
           </table>
         </div>
-        <div class="panel panel-default asset-list-panel-body">
+        <div class="panel panel-default asset-list-panel-body {{if (eq filteredAssetList.length 0) 'no-asset-records'}}">
+          {{#if (eq filteredAssetList.length 0)}}
+            No {{#if (not-eq assetListType "")}} {{assetListType}}{{/if}} assets
+          {{/if}}
           <table id="asset-list" class="table asset-list listing table-striped table-hover table-bordered" cellspacing="0" width="100%">
             <tbody>
               {{#each filteredAssetList as |asset idx|}}
-                <tr data-asset-id="{{asset.id}}">
+                <tr {{action 'selectAsset' asset.id}} class="{{if (eq currentAssetId asset.id) "active-asset-row"}}">
                   <td class="col-xs-3">{{asset.name}}</td>
                   <td class="col-xs-3">{{asset.type}}</td>
                   <td class="col-xs-3">{{asset.description}}</td>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d2388b3/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-manager.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-manager.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-manager.hbs
index 0fa86b0..7a21af1 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-manager.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/asset-manager.hbs
@@ -38,7 +38,10 @@
             </thead>
           </table>
         </div>
-        <div class="panel panel-default asset-list-panel-body">
+        <div class="panel panel-default asset-list-panel-body {{if (eq filteredAssetList.length 0) 'no-asset-records'}}">
+          {{#if (eq filteredAssetList.length 0)}}
+            <span>No assets</span>
+          {{/if}}
           <table id="asset-list" class="table asset-list listing table-striped table-hover table-bordered" cellspacing="0" width="100%">
             <tbody>
               {{#each filteredAssetList as |asset idx|}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d2388b3/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
index 80af968..95c8c3b 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
@@ -248,12 +248,12 @@
             <span class="overlay-asset-import-icon" title="Import Asset" {{action "showAssetList" true}}>
                 <i class="fa fa-download"></i>
             </span>
-            <span class="overlay-asset-export-icon" title="Publish Asset" {{action "showAssetConfig" true}}>
-                <i class="fa fa-upload"></i>
-            </span>
             <span class="overlay-hdfs-asset-import-icon" title="Import asset from HDFS" {{action "showActionSettingsFileBrowser"}}>
               <i class="fa fa-cloud-download"></i>
             </span>
+            <span class="overlay-asset-export-icon" title="Publish Asset" {{action "showAssetConfig" true}}>
+                <i class="fa fa-upload"></i>
+            </span>
             <span class="overlay-hdfs-asset-export-icon" title="Publish Asset to HDFS" {{action "showExportActionNodeFileBrowser"}}>
               <i class="fa fa-cloud-upload"></i>
             </span>
@@ -303,7 +303,7 @@
   {{#asset-config showAssetConfig="showAssetConfig" saveAssetConfig="saveAssetConfig" assetModel=assetConfig}}{{/asset-config}}
 {{/if}}
 {{#if showingAssetList}}
-  {{#asset-list showAssetList="showAssetList" importAsset="importAsset" assetList=assetList}}{{/asset-list}}
+  {{#asset-list showAssetList="showAssetList" importAsset="importAsset" assetList=assetList assetListType=assetListType}}{{/asset-list}}
 {{/if}}
 {{#if showingAssetNodeList}}
   {{#asset-list showAssetList="showAssetNodeList" importAsset="importAssetNode" deleteAsset="deleteAsset" assetList=assetList}}{{/asset-list}}


[14/50] [abbrv] ambari git commit: AMBARI-19044 Install & configure Ranger plugin components independently of Ranger admin components (mugdha)

Posted by nc...@apache.org.
AMBARI-19044 Install & configure Ranger plugin components independently of Ranger admin components (mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1524fd77
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1524fd77
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1524fd77

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 1524fd775d4b25d0896c648cb1bbc8ed3644a73d
Parents: 8b22dd0
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Tue Jan 17 17:08:02 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Tue Jan 17 18:16:38 2017 +0530

----------------------------------------------------------------------
 .../libraries/functions/constants.py            |   3 +
 .../functions/setup_ranger_plugin_xml.py        |  47 +++-
 .../server/upgrade/UpgradeCatalog250.java       |  37 ++++
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |  71 ++++--
 .../package/scripts/setup_ranger_atlas.py       |   4 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  | 163 +++++++-------
 .../package/scripts/setup_ranger_hbase.py       |   4 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   | 166 +++++++-------
 .../package/scripts/setup_ranger_hdfs.py        |  44 ++--
 .../0.12.0.2.0/package/scripts/params_linux.py  | 161 +++++++-------
 .../package/scripts/setup_ranger_hive.py        |   6 +-
 .../KAFKA/0.8.1/package/scripts/params.py       | 126 +++++------
 .../0.8.1/package/scripts/setup_ranger_kafka.py |   4 +-
 .../0.9.0/configuration/ranger-kafka-audit.xml  |  32 +--
 .../ranger-kafka-plugin-properties.xml          |  14 +-
 .../ranger-kafka-policymgr-ssl.xml              |  12 +-
 .../configuration/ranger-kafka-security.xml     |  18 +-
 .../ranger-knox-plugin-properties.xml           |  12 +-
 .../0.5.0.2.2/package/scripts/params_linux.py   | 155 ++++++-------
 .../package/scripts/setup_ranger_knox.py        |   5 +-
 .../configuration/ranger-kms-security.xml       |   6 +
 .../0.10.0/configuration/ranger-storm-audit.xml |  32 +--
 .../ranger-storm-policymgr-ssl.xml              |  12 +-
 .../configuration/ranger-storm-security.xml     |  18 +-
 .../STORM/0.9.1/package/scripts/params_linux.py | 161 +++++++-------
 .../0.9.1/package/scripts/setup_ranger_storm.py |   4 +-
 .../ranger-storm-plugin-properties.xml          |  71 ++++++
 .../2.1.0.2.0/package/scripts/params_linux.py   | 215 ++++++++++---------
 .../package/scripts/resourcemanager.py          |   2 +-
 .../package/scripts/setup_ranger_yarn.py        |   4 +-
 .../HDP/2.0.6/properties/stack_features.json    |  17 +-
 .../ranger-hbase-plugin-properties.xml          |  10 +-
 .../ranger-hdfs-plugin-properties.xml           |  12 +-
 .../ranger-hive-plugin-properties.xml           |  10 +-
 .../ranger-knox-plugin-properties.xml           |   2 +-
 .../stacks/HDP/2.2/services/stack_advisor.py    |  38 ++--
 .../HBASE/configuration/ranger-hbase-audit.xml  |  32 +--
 .../ranger-hbase-policymgr-ssl.xml              |  12 +-
 .../configuration/ranger-hbase-security.xml     |  20 +-
 .../configuration/ranger-hdfs-policymgr-ssl.xml |  12 +-
 .../HDFS/configuration/ranger-hdfs-security.xml |  20 +-
 .../HIVE/configuration/ranger-hive-audit.xml    |  32 +--
 .../configuration/ranger-hive-policymgr-ssl.xml |  12 +-
 .../HIVE/configuration/ranger-hive-security.xml |  20 +-
 .../ranger-kafka-policymgr-ssl.xml              |   4 +-
 .../KNOX/configuration/ranger-knox-audit.xml    |  32 +--
 .../configuration/ranger-knox-policymgr-ssl.xml |  12 +-
 .../KNOX/configuration/ranger-knox-security.xml |  18 +-
 .../ranger-storm-policymgr-ssl.xml              |   4 +-
 .../configuration/ranger-storm-security.xml     |   2 +-
 .../YARN/configuration/ranger-yarn-audit.xml    |  32 +--
 .../ranger-yarn-plugin-properties.xml           |  12 +-
 .../configuration/ranger-yarn-policymgr-ssl.xml |  12 +-
 .../YARN/configuration/ranger-yarn-security.xml |  18 +-
 .../stacks/HDP/2.3/services/stack_advisor.py    |  34 +++
 .../ATLAS/configuration/ranger-atlas-audit.xml  |   6 +-
 .../ranger-atlas-plugin-properties.xml          |  58 ++++-
 .../ranger-atlas-policymgr-ssl.xml              |  12 +-
 .../configuration/ranger-atlas-security.xml     |  20 +-
 .../ranger-hbase-plugin-properties.xml          |  71 ++++++
 .../ranger-hdfs-plugin-properties.xml           |  50 ++++-
 .../ranger-hive-plugin-properties.xml           |  71 ++++++
 .../HIVE/configuration/ranger-hive-security.xml |   2 +-
 .../ranger-kafka-plugin-properties.xml          |  71 ++++++
 .../ranger-knox-plugin-properties.xml           |  71 ++++++
 .../ranger-storm-policymgr-ssl.xml              |   4 +-
 .../configuration/ranger-storm-security.xml     |   2 +-
 .../ranger-yarn-plugin-properties.xml           |  71 ++++++
 .../stacks/HDP/2.5/services/stack_advisor.py    |   7 +
 .../server/upgrade/UpgradeCatalog250Test.java   | 110 ++++++++++
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |   6 +-
 .../python/stacks/2.0.6/configs/default.json    |  10 +-
 .../stacks/2.0.6/configs/default_client.json    |   3 +-
 .../2.0.6/configs/default_hive_nn_ha.json       |   3 +-
 .../2.0.6/configs/default_hive_nn_ha_2.json     |   3 +-
 .../2.0.6/configs/default_hive_non_hdfs.json    |   3 +-
 .../2.0.6/configs/default_no_install.json       |   3 +-
 .../2.0.6/configs/default_with_bucket.json      |   4 +-
 .../2.0.6/configs/ha_bootstrap_active_node.json |   2 +-
 .../configs/ha_bootstrap_standby_node.json      |   2 +-
 ...ha_bootstrap_standby_node_initial_start.json |   2 +-
 ...dby_node_initial_start_dfs_nameservices.json |   2 +-
 .../python/stacks/2.0.6/configs/ha_default.json |   4 +-
 .../python/stacks/2.0.6/configs/ha_secured.json |   2 +-
 .../python/stacks/2.0.6/configs/hbase-2.2.json  |   4 +-
 .../2.0.6/configs/hbase-rs-2.2-phoenix.json     |   4 +-
 .../stacks/2.0.6/configs/hbase-rs-2.2.json      |   4 +-
 .../python/stacks/2.0.6/configs/nn_ru_lzo.json  |   2 +-
 .../python/stacks/2.0.6/configs/secured.json    |  12 +-
 .../stacks/2.0.6/configs/secured_client.json    |   3 +-
 .../stacks/2.1/configs/default-storm-start.json |   2 +-
 .../test/python/stacks/2.1/configs/default.json |   5 +-
 .../stacks/2.1/configs/secured-storm-start.json |   2 +-
 .../test/python/stacks/2.1/configs/secured.json |   5 +-
 .../stacks/2.2/common/test_stack_advisor.py     |  53 ++++-
 .../test/python/stacks/2.2/configs/default.json |   6 +-
 .../python/stacks/2.2/configs/hive-upgrade.json |   3 +-
 .../stacks/2.3/common/test_stack_advisor.py     |   3 +-
 .../python/stacks/2.5/configs/hsi_default.json  |   3 +-
 .../test/python/stacks/2.5/configs/hsi_ha.json  |   3 +-
 .../controllers/main/service/info/configs.js    |   4 +-
 .../app/controllers/wizard/step7_controller.js  |   4 +-
 102 files changed, 1889 insertions(+), 946 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/constants.py b/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
index 56af615..6895e34 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/constants.py
@@ -106,6 +106,9 @@ class StackFeature:
   ZKFC_VERSION_ADVERTISED = "zkfc_version_advertised"
   PHOENIX_CORE_HDFS_SITE_REQUIRED = "phoenix_core_hdfs_site_required"
   RANGER_TAGSYNC_SSL_XML_SUPPORT="ranger_tagsync_ssl_xml_support"
+  RANGER_XML_CONFIGURATION = "ranger_xml_configuration"
+  KAFKA_RANGER_PLUGIN_SUPPORT = "kafka_ranger_plugin_support"
+  YARN_RANGER_PLUGIN_SUPPORT = "yarn_ranger_plugin_support"
   RANGER_SOLR_CONFIG_SUPPORT='ranger_solr_config_support'
   HIVE_INTERACTIVE_ATLAS_HOOK_REQUIRED="hive_interactive_atlas_hook_required"
   CORE_SITE_FOR_RANGER_PLUGINS_SUPPORT='core_site_for_ranger_plugins'

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index 6561928..a12116d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -17,8 +17,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-__all__ = ["setup_ranger_plugin"]
-
+__all__ = ["setup_ranger_plugin", "get_audit_configs"]
 
 import os
 import ambari_simplejson as json
@@ -34,6 +33,7 @@ from resource_management.libraries.functions.ranger_functions_v2 import Rangerad
 from resource_management.core.utils import PasswordString
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
 
 def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar,
                         component_downloaded_custom_connector, component_driver_curl_source,
@@ -164,8 +164,8 @@ def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar,
         group = component_group,
         mode=0744) 
 
-    #This should be done by rpm
-    #setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)
+    # creating symblink should be done by rpm package
+    # setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)
 
     setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file,
               xa_audit_db_password, ssl_truststore_password, ssl_keystore_password,
@@ -176,7 +176,6 @@ def setup_ranger_plugin(component_select_name, service_name, previous_jdbc_jar,
       action="delete"      
     )    
 
-
 def setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list):
 
   stack_root = Script.get_stack_root()
@@ -217,7 +216,6 @@ def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_versio
     mode = 0640
   )
 
-
 def setup_core_site_for_required_plugins(component_user, component_group, create_core_site_path, config):
   XmlConfig('core-site.xml',
     conf_dir=create_core_site_path,
@@ -227,3 +225,40 @@ def setup_core_site_for_required_plugins(component_user, component_group, create
     group=component_group,
     mode=0644
   )
+
+def get_audit_configs(config):
+  xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR'].lower()
+  xa_db_host = config['configurations']['admin-properties']['db_host']
+  xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+
+  if xa_audit_db_flavor == 'mysql':
+    jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+    previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+    audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
+    jdbc_driver = "com.mysql.jdbc.Driver"
+  elif xa_audit_db_flavor == 'oracle':
+    jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+    previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+    colon_count = xa_db_host.count(':')
+    if colon_count == 2 or colon_count == 0:
+      audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
+    else:
+      audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
+    jdbc_driver = "oracle.jdbc.OracleDriver"
+  elif xa_audit_db_flavor == 'postgres':
+    jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+    previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+    audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
+    jdbc_driver = "org.postgresql.Driver"
+  elif xa_audit_db_flavor == 'mssql':
+    jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+    previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+    audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
+    jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+  elif xa_audit_db_flavor == 'sqla':
+    jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+    previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+    audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
+    jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+
+  return jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index 29e1f17..6638379 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@ -166,6 +166,7 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
     updateLogSearchConfigs();
     updateAmbariInfraConfigs();
     updateYarnSite();
+    updateRangerUrlConfigs();
     addManageServiceAutoStartPermissions();
   }
 
@@ -881,4 +882,40 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
         "CLUSTER.OPERATOR:CLUSTER");
     addRoleAuthorization("CLUSTER.MANAGE_AUTO_START", "Manage service auto-start configuration", roles);
   }
+
+  /**
+   * Updates Ranger admin url for Ranger plugin supported configs.
+   *
+   * @throws AmbariException
+   */
+  protected void updateRangerUrlConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+
+      Config ranger_admin_properties = cluster.getDesiredConfigByType("admin-properties");
+      if(null != ranger_admin_properties) {
+        String policyUrl = ranger_admin_properties.getProperties().get("policymgr_external_url");
+        if (null != policyUrl) {
+          updateRangerUrl(cluster, "ranger-hdfs-security", "ranger.plugin.hdfs.policy.rest.url", policyUrl);
+          updateRangerUrl(cluster, "ranger-hive-security", "ranger.plugin.hive.policy.rest.url", policyUrl);
+          updateRangerUrl(cluster, "ranger-hbase-security", "ranger.plugin.hbase.policy.rest.url", policyUrl);
+          updateRangerUrl(cluster, "ranger-knox-security", "ranger.plugin.knox.policy.rest.url", policyUrl);
+          updateRangerUrl(cluster, "ranger-storm-security", "ranger.plugin.storm.policy.rest.url", policyUrl);
+          updateRangerUrl(cluster, "ranger-yarn-security", "ranger.plugin.yarn.policy.rest.url", policyUrl);
+          updateRangerUrl(cluster, "ranger-kafka-security", "ranger.plugin.kafka.policy.rest.url", policyUrl);
+          updateRangerUrl(cluster, "ranger-atlas-security", "ranger.plugin.atlas.policy.rest.url", policyUrl);
+          updateRangerUrl(cluster, "ranger-kms-security", "ranger.plugin.kms.policy.rest.url", policyUrl);
+        }
+      }
+    }
+  }
+
+  protected void updateRangerUrl(Cluster cluster, String configType, String configProperty, String policyUrl) throws AmbariException {
+    Config componentSecurity = cluster.getDesiredConfigByType(configType);
+    if(componentSecurity != null && componentSecurity.getProperties().containsKey(configProperty)) {
+      Map<String, String> updateProperty = new HashMap<>();
+      updateProperty.put(configProperty, policyUrl);
+      updateConfigurationPropertiesForCluster(cluster, configType, updateProperty, true, false);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
index 94193be..c74d046 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
@@ -219,17 +219,7 @@ for host in zookeeper_hosts:
   if index < len(zookeeper_hosts):
     zookeeper_quorum += ","
 
-
-# Atlas Ranger plugin configurations
-stack_supports_atlas_ranger_plugin = check_stack_feature(StackFeature.ATLAS_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
-stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
 stack_supports_atlas_hdfs_site_on_namenode_ha = check_stack_feature(StackFeature.ATLAS_HDFS_SITE_ON_NAMENODE_HA, version_for_stack_feature_checks)
-retry_enabled = default("/commandParams/command_retry_enabled", False)
-
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-enable_ranger_atlas = False
 
 atlas_server_xmx = default("configurations/atlas-env/atlas_server_xmx", 2048)
 atlas_server_max_new_size = default("configurations/atlas-env/atlas_server_max_new_size", 614)
@@ -237,9 +227,6 @@ atlas_server_max_new_size = default("configurations/atlas-env/atlas_server_max_n
 hbase_master_hosts = default('/clusterHostInfo/hbase_master_hosts', [])
 has_hbase_master = not len(hbase_master_hosts) == 0
 
-ranger_admin_hosts = default('/clusterHostInfo/ranger_admin_hosts', [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-
 atlas_hbase_setup = format("{exec_tmp_dir}/atlas_hbase_setup.rb")
 atlas_kafka_setup = format("{exec_tmp_dir}/atlas_kafka_acl.sh")
 atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
@@ -247,7 +234,6 @@ atlas_audit_hbase_tablename = default('/configurations/application-properties/at
 
 hbase_user_keytab = default('/configurations/hbase-env/hbase_user_keytab', None)
 hbase_principal_name = default('/configurations/hbase-env/hbase_principal_name', None)
-enable_ranger_hbase = False
 
 # ToDo: Kafka port to Atlas
 # Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
@@ -289,7 +275,29 @@ if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_fea
 namenode_host = set(default("/clusterHostInfo/namenode_host", []))
 has_namenode = not len(namenode_host) == 0
 
-if has_ranger_admin and stack_supports_atlas_ranger_plugin:
+# ranger altas plugin section start
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+retry_enabled = default("/commandParams/command_retry_enabled", False)
+
+stack_supports_atlas_ranger_plugin = check_stack_feature(StackFeature.ATLAS_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ranger atlas plugin enabled property
+enable_ranger_atlas = default("/configurations/ranger-atlas-plugin-properties/ranger-atlas-plugin-enabled", "No")
+enable_ranger_atlas = True if enable_ranger_atlas.lower() == "yes" else False
+
+# ranger hbase plugin enabled property
+enable_ranger_hbase = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled", "No")
+enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
+
+if stack_supports_atlas_ranger_plugin and enable_ranger_atlas:
   # for create_hdfs_directory
   hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
   hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']  if has_namenode else None
@@ -320,27 +328,42 @@ if has_ranger_admin and stack_supports_atlas_ranger_plugin:
     dfs_type = dfs_type
   )
 
+  # ranger atlas service/repository name
   repo_name = str(config['clusterName']) + '_atlas'
   repo_name_value = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.service.name']
   if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
     repo_name = repo_name_value
-  ssl_keystore_password = unicode(config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'])
-  ssl_truststore_password = unicode(config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'])
+
+  ssl_keystore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']
+  ssl_truststore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']
   credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
   xa_audit_hdfs_is_enabled = default('/configurations/ranger-atlas-audit/xasecure.audit.destination.hdfs', False)
-  enable_ranger_atlas = config['configurations']['ranger-atlas-plugin-properties']['ranger-atlas-plugin-enabled']
-  enable_ranger_atlas = not is_empty(enable_ranger_atlas) and enable_ranger_atlas.lower() == 'yes'
-  enable_ranger_hbase = config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled']
-  enable_ranger_hbase = not is_empty(enable_ranger_hbase) and enable_ranger_hbase.lower() == 'yes'
-  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
 
   downloaded_custom_connector = None
   driver_curl_source = None
   driver_curl_target = None
 
   ranger_env = config['configurations']['ranger-env']
-  ranger_plugin_properties = config['configurations']['ranger-atlas-plugin-properties']
 
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_atlas:
+    external_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-atlas-plugin-properties']
   ranger_atlas_audit = config['configurations']['ranger-atlas-audit']
   ranger_atlas_audit_attrs = config['configuration_attributes']['ranger-atlas-audit']
   ranger_atlas_security = config['configurations']['ranger-atlas-security']
@@ -357,6 +380,7 @@ if has_ranger_admin and stack_supports_atlas_ranger_plugin:
     'commonNameForCertificate' : config['configurations']['ranger-atlas-plugin-properties']['common.name.for.certificate'],
     'ambari.service.check.user' : policy_user
   }
+
   if security_enabled:
     atlas_repository_configuration['policy.download.auth.users'] = metadata_user
     atlas_repository_configuration['tag.download.auth.users'] = metadata_user
@@ -368,3 +392,4 @@ if has_ranger_admin and stack_supports_atlas_ranger_plugin:
     'name': repo_name,
     'type': 'atlas',
     }
+# ranger atlas plugin section end

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/setup_ranger_atlas.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/setup_ranger_atlas.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/setup_ranger_atlas.py
index f5d7f38..c47c75c 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/setup_ranger_atlas.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/setup_ranger_atlas.py
@@ -19,7 +19,7 @@ from resource_management.core.logger import Logger
 def setup_ranger_atlas(upgrade_type=None):
   import params
 
-  if params.has_ranger_admin:
+  if params.enable_ranger_atlas:
 
     from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
 
@@ -67,4 +67,4 @@ def setup_ranger_atlas(upgrade_type=None):
                         component_user_principal=params.atlas_jaas_principal if params.security_enabled else None,
                         component_user_keytab=params.atlas_keytab_path if params.security_enabled else None)
   else:
-    Logger.info('Ranger admin not installed')
\ No newline at end of file
+    Logger.info('Ranger Atlas plugin is not enabled')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index e27fd72..268d81c 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -41,6 +41,7 @@ from resource_management.libraries.functions.get_not_managed_resources import ge
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.expect import expect
 from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs
 
 # server configurations
 config = Script.get_config()
@@ -232,8 +233,6 @@ hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 
-
-
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 
@@ -258,87 +257,90 @@ HdfsResource = functools.partial(
   dfs_type = dfs_type
 )
 
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-# ranger hbase properties
-policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-xa_db_host = config['configurations']['admin-properties']['db_host']
-repo_name = str(config['clusterName']) + '_hbase'
-repo_name_value = config['configurations']['ranger-hbase-security']['ranger.plugin.hbase.service.name']
-if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
-  repo_name = repo_name_value
-
-common_name_for_certificate = config['configurations']['ranger-hbase-plugin-properties']['common.name.for.certificate']
-
 zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
 hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
 hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
 hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication']
 hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
 
-repo_config_username = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+# ranger hbase plugin section start
 
-ranger_env = config['configurations']['ranger-env']
-ranger_plugin_properties = config['configurations']['ranger-hbase-plugin-properties']
-policy_user = config['configurations']['ranger-hbase-plugin-properties']['policy_user']
-
-#For curl command in ranger plugin to get db connector
+# to get db connector jar
 jdk_location = config['hostLevelParams']['jdk_location']
-java_share_dir = '/usr/share/java'
-enable_ranger_hbase = False
-if has_ranger_admin:
-  enable_ranger_hbase = (config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled'].lower() == 'yes')
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env introduced, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger hbase plugin enabled property
+enable_ranger_hbase = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled", "No")
+enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
+
+# ranger hbase properties
+if enable_ranger_hbase:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-hbase-security']['ranger.plugin.hbase.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger hbase service/repository name
+  repo_name = str(config['clusterName']) + '_hbase'
+  repo_name_value = config['configurations']['ranger-hbase-security']['ranger.plugin.hbase.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  common_name_for_certificate = config['configurations']['ranger-hbase-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+  ranger_plugin_properties = config['configurations']['ranger-hbase-plugin-properties']
+  policy_user = config['configurations']['ranger-hbase-plugin-properties']['policy_user']
+  repo_config_password = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_hbase:
+    external_admin_username = default('/configurations/ranger-hbase-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-hbase-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-hbase-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-hbase-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
   xa_audit_db_password = ''
-  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-  repo_config_password = unicode(config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
-  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  downloaded_custom_connector = None
   previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
 
-  if stack_supports_ranger_audit_db:
-    if xa_audit_db_flavor == 'mysql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "com.mysql.jdbc.Driver"
-    elif xa_audit_db_flavor == 'oracle':
-      jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-      colon_count = xa_db_host.count(':')
-      if colon_count == 2 or colon_count == 0:
-        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-      else:
-        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-      jdbc_driver = "oracle.jdbc.OracleDriver"
-    elif xa_audit_db_flavor == 'postgres':
-      jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "org.postgresql.Driver"
-    elif xa_audit_db_flavor == 'mssql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-    elif xa_audit_db_flavor == 'sqla':
-      jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-  downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_target = format("{stack_root}/current/{component_directory}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  previous_jdbc_jar = format("{stack_root}/current/{component_directory}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  sql_connector_jar = ''
+    downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{stack_root}/current/{component_directory}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{stack_root}/current/{component_directory}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
 
   if security_enabled:
     master_principal = config['configurations']['hbase-site']['hbase.master.kerberos.principal']
@@ -385,23 +387,24 @@ if has_ranger_admin:
   if stack_supports_ranger_kerberos and security_enabled and 'hbase-master' in component_directory.lower():
     ranger_hbase_principal = master_jaas_princ
     ranger_hbase_keytab = master_keytab_path
-  elif  stack_supports_ranger_kerberos and security_enabled and 'hbase-regionserver' in component_directory.lower():
+  elif stack_supports_ranger_kerberos and security_enabled and 'hbase-regionserver' in component_directory.lower():
     ranger_hbase_principal = regionserver_jaas_princ
     ranger_hbase_keytab = regionserver_keytab_path
 
   xa_audit_db_is_enabled = False
-  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
   if xml_configurations_supported and stack_supports_ranger_audit_db:
     xa_audit_db_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.db']
-  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-  ssl_keystore_password = unicode(config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-  ssl_truststore_password = unicode(config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
 
-  #For SQLA explicitly disable audit to DB for Ranger
-  if xa_audit_db_flavor == 'sqla':
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
     xa_audit_db_is_enabled = False
 
+# ranger hbase plugin section end
 
 create_hbase_home_directory = check_stack_feature(StackFeature.HBASE_HOME_DIRECTORY, stack_version_formatted)
 hbase_home_directory = format("/user/{hbase_user}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
index 0d73e39..d32dce1 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
@@ -22,7 +22,7 @@ from resource_management.core.logger import Logger
 def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"):
   import params
 
-  if params.has_ranger_admin:
+  if params.enable_ranger_hbase:
 
     stack_version = None
 
@@ -103,4 +103,4 @@ def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"):
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
                         stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
-    Logger.info('Ranger admin not installed')
+    Logger.info('Ranger HBase plugin is not enabled')

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 21e7b68..31431b9 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -44,7 +44,7 @@ from resource_management.libraries.functions.get_lzo_packages import get_lzo_pac
 from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.get_architecture import get_architecture
-
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -392,95 +392,100 @@ dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-#ranger hdfs properties
-policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-xa_db_host = config['configurations']['admin-properties']['db_host']
-repo_name = str(config['clusterName']) + '_hadoop'
-repo_name_value = config['configurations']['ranger-hdfs-security']['ranger.plugin.hdfs.service.name']
-if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
-  repo_name = repo_name_value
-
 hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
 hadoop_security_authorization = config['configurations']['core-site']['hadoop.security.authorization']
 fs_default_name = config['configurations']['core-site']['fs.defaultFS']
 hadoop_security_auth_to_local = config['configurations']['core-site']['hadoop.security.auth_to_local']
-hadoop_rpc_protection = config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
-common_name_for_certificate = config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
-
-repo_config_username = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
 
 if security_enabled:
   sn_principal_name = default("/configurations/hdfs-site/dfs.secondary.namenode.kerberos.principal", "nn/_HOST@EXAMPLE.COM")
   sn_principal_name = sn_principal_name.replace('_HOST',hostname.lower())
 
-ranger_env = config['configurations']['ranger-env']
-ranger_plugin_properties = config['configurations']['ranger-hdfs-plugin-properties']
-policy_user = config['configurations']['ranger-hdfs-plugin-properties']['policy_user']
-
-#For curl command in ranger plugin to get db connector
+# for curl command in ranger plugin to get db connector
 jdk_location = config['hostLevelParams']['jdk_location']
 java_share_dir = '/usr/share/java'
 
 is_https_enabled = is_https_enabled_in_hdfs(config['configurations']['hdfs-site']['dfs.http.policy'],
                                             config['configurations']['hdfs-site']['dfs.https.enable'])
 
-if has_ranger_admin:
-  enable_ranger_hdfs = (config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled'].lower() == 'yes')
+# ranger hdfs plugin section start
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger hdfs plugin enabled property
+enable_ranger_hdfs = default("/configurations/ranger-hdfs-plugin-properties/ranger-hdfs-plugin-enabled", "No")
+enable_ranger_hdfs = True if enable_ranger_hdfs.lower() == 'yes' else False
+
+# get ranger hdfs properties if enable_ranger_hdfs is True
+if enable_ranger_hdfs:
+  # ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-hdfs-security']['ranger.plugin.hdfs.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger hdfs service name
+  repo_name = str(config['clusterName']) + '_hadoop'
+  repo_name_value = config['configurations']['ranger-hdfs-security']['ranger.plugin.hdfs.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  hadoop_rpc_protection = config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
+  common_name_for_certificate = config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_hdfs:
+    external_admin_username = default('/configurations/ranger-hdfs-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-hdfs-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-hdfs-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-hdfs-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-hdfs-plugin-properties']
+  policy_user = config['configurations']['ranger-hdfs-plugin-properties']['policy_user']
+  repo_config_password = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
   xa_audit_db_password = ''
-  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-  repo_config_password = unicode(config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
-  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  downloaded_custom_connector = None
   previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  # to get db connector related properties
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
 
-  if stack_supports_ranger_audit_db:
-
-    if xa_audit_db_flavor == 'mysql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "com.mysql.jdbc.Driver"
-    elif xa_audit_db_flavor == 'oracle':
-      jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-      colon_count = xa_db_host.count(':')
-      if colon_count == 2 or colon_count == 0:
-        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-      else:
-        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-      jdbc_driver = "oracle.jdbc.OracleDriver"
-    elif xa_audit_db_flavor == 'postgres':
-      jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "org.postgresql.Driver"
-    elif xa_audit_db_flavor == 'mssql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-    elif xa_audit_db_flavor == 'sqla':
-      jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_target = format("{hadoop_lib_home}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  previous_jdbc_jar = format("{hadoop_lib_home}/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-
-  sql_connector_jar = ''
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{hadoop_lib_home}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{hadoop_lib_home}/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
 
   hdfs_ranger_plugin_config = {
     'username': repo_config_username,
@@ -504,6 +509,7 @@ if has_ranger_admin:
     'repositoryType': 'hdfs',
     'assetType': '1'
   }
+
   if stack_supports_ranger_kerberos and security_enabled:
     hdfs_ranger_plugin_config['policy.download.auth.users'] = hdfs_user
     hdfs_ranger_plugin_config['tag.download.auth.users'] = hdfs_user
@@ -520,14 +526,16 @@ if has_ranger_admin:
     }
 
   xa_audit_db_is_enabled = False
-  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
   if xml_configurations_supported and stack_supports_ranger_audit_db:
     xa_audit_db_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.db']
-  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-  ssl_keystore_password = unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-  ssl_truststore_password = unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
 
-  #For SQLA explicitly disable audit to DB for Ranger
-  if xa_audit_db_flavor == 'sqla':
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
     xa_audit_db_is_enabled = False
+
+# ranger hdfs plugin section end

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
index e3aff9d..47c6e35 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
@@ -29,8 +29,7 @@ from resource_management.libraries.functions.format import format
 def setup_ranger_hdfs(upgrade_type=None):
   import params
 
-  if params.has_ranger_admin:
-
+  if params.enable_ranger_hdfs:
 
     stack_version = None
 
@@ -93,29 +92,28 @@ def setup_ranger_hdfs(upgrade_type=None):
         target_file = source_file + ".bak"
         Execute(("mv", source_file, target_file), sudo=True, only_if=format("test -f {source_file}"))
   else:
-    Logger.info('Ranger admin not installed')
+    Logger.info('Ranger Hdfs plugin is not enabled')
 
 def create_ranger_audit_hdfs_directories():
   import params
 
-  if params.has_ranger_admin:
-    if params.xml_configurations_supported and params.enable_ranger_hdfs and params.xa_audit_hdfs_is_enabled:
-      params.HdfsResource("/ranger/audit",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0755,
-                         recursive_chmod=True,
-      )
-      params.HdfsResource("/ranger/audit/hdfs",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0700,
-                         recursive_chmod=True,
-      )
-      params.HdfsResource(None, action="execute")
+  if params.enable_ranger_hdfs and params.xml_configurations_supported and params.xa_audit_hdfs_is_enabled:
+    params.HdfsResource("/ranger/audit",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       group=params.hdfs_user,
+                       mode=0755,
+                       recursive_chmod=True,
+    )
+    params.HdfsResource("/ranger/audit/hdfs",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       group=params.hdfs_user,
+                       mode=0700,
+                       recursive_chmod=True,
+    )
+    params.HdfsResource(None, action="execute")
   else:
-    Logger.info('Ranger admin not installed')
+    Logger.info('Ranger Hdfs plugin is not enabled')

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 62fdbfd..9185f78 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -43,6 +43,7 @@ from resource_management.libraries.functions.expect import expect
 from resource_management.libraries import functions
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs
 
 # Default log4j version; put config files under /etc/hive/conf
 log4j_version = '1'
@@ -641,84 +642,85 @@ if has_hive_interactive:
 hive_server2_zookeeper_namespace = config['configurations']['hive-site']['hive.server2.zookeeper.namespace']
 hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
 
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-
-#ranger hive properties
-policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-xa_db_host = config['configurations']['admin-properties']['db_host']
-repo_name = str(config['clusterName']) + '_hive'
-repo_name_value = config['configurations']['ranger-hive-security']['ranger.plugin.hive.service.name']
-if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
-  repo_name = repo_name_value
-
-jdbc_driver_class_name = config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
-common_name_for_certificate = config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
-
-repo_config_username = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
-
-ranger_env = config['configurations']['ranger-env']
-ranger_plugin_properties = config['configurations']['ranger-hive-plugin-properties']
-policy_user = config['configurations']['ranger-hive-plugin-properties']['policy_user']
+if security_enabled:
+  hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
+  hive_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
 
 hive_cluster_token_zkstore = default("/configurations/hive-site/hive.cluster.delegation.token.store.zookeeper.znode", None)
 jaas_file = os.path.join(hive_config_dir, 'zkmigrator_jaas.conf')
 zkdtsm_pattern = '/zkdtsm_*'
 hive_zk_namespace = default("/configurations/hive-site/hive.zookeeper.namespace", None)
 
-if security_enabled:
-  hive_principal = hive_server_principal.replace('_HOST',hostname.lower())
-  hive_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
+# ranger hive plugin section start
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
 
-#For curl command in ranger plugin to get db connector
-if has_ranger_admin:
-  enable_ranger_hive = (config['configurations']['hive-env']['hive_security_authorization'].lower() == 'ranger')
-  repo_config_password = unicode(config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
-  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+# ranger hive plugin enabled property
+enable_ranger_hive = config['configurations']['hive-env']['hive_security_authorization'].lower() == 'ranger'
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# get ranger hive properties if enable_ranger_hive is True
+if enable_ranger_hive:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-hive-security']['ranger.plugin.hive.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger hive service name
+  repo_name = str(config['clusterName']) + '_hive'
+  repo_name_value = config['configurations']['ranger-hive-security']['ranger.plugin.hive.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  jdbc_driver_class_name = config['configurations']['ranger-hive-plugin-properties']['jdbc.driverClassName']
+  common_name_for_certificate = config['configurations']['ranger-hive-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_hive:
+    external_admin_username = default('/configurations/ranger-hive-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-hive-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-hive-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-hive-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-hive-plugin-properties']
+  policy_user = config['configurations']['ranger-hive-plugin-properties']['policy_user']
+  repo_config_password = config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  ranger_downloaded_custom_connector = None
   ranger_previous_jdbc_jar_name = None
+  ranger_driver_curl_source = None
+  ranger_driver_curl_target = None
+  ranger_previous_jdbc_jar = None
+
+  # to get db connector related properties
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    ranger_jdbc_jar_name, ranger_previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
 
-  if stack_supports_ranger_audit_db:
-    if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "com.mysql.jdbc.Driver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-      colon_count = xa_db_host.count(':')
-      if colon_count == 2 or colon_count == 0:
-        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-      else:
-        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-      jdbc_driver = "oracle.jdbc.OracleDriver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "org.postgresql.Driver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
-      ranger_jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-      ranger_previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-  ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  ranger_previous_jdbc_jar = format("{hive_lib}/{ranger_previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  sql_connector_jar = ''
+    ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}")
+    ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_jar_name}")
+    ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}")
+    ranger_previous_jdbc_jar = format("{hive_lib}/{ranger_previous_jdbc_jar_name}")
+    sql_connector_jar = ''
 
   ranger_hive_url = format("{hive_url}/default;principal={hive_principal}") if security_enabled else hive_url
   if stack_supports_ranger_hive_jdbc_url_change:
@@ -757,20 +759,21 @@ if has_ranger_admin:
       'type': 'hive'
     }
 
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
 
   xa_audit_db_is_enabled = False
-  xa_audit_db_password = ''
-  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
   if xml_configurations_supported and stack_supports_ranger_audit_db:
     xa_audit_db_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.db']
-  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-  ssl_keystore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-  ssl_truststore_password = unicode(config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
 
-  #For SQLA explicitly disable audit to DB for Ranger
-  if xa_audit_db_flavor == 'sqla':
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hive-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-hive-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
     xa_audit_db_is_enabled = False
 
+# ranger hive plugin section end
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
index 81a4e3e..80bd7c8 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
@@ -22,7 +22,7 @@ from resource_management.core.logger import Logger
 def setup_ranger_hive(upgrade_type = None):
   import params
 
-  if params.has_ranger_admin:
+  if params.enable_ranger_hive:
 
     stack_version = None
 
@@ -34,7 +34,7 @@ def setup_ranger_hive(upgrade_type = None):
     else:
       Logger.info("Hive: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
 
-    if params.xml_configurations_supported and params.enable_ranger_hive and params.xa_audit_hdfs_is_enabled:
+    if params.xml_configurations_supported and params.xa_audit_hdfs_is_enabled:
       params.HdfsResource("/ranger/audit",
                          type="directory",
                          action="create_on_execute",
@@ -95,4 +95,4 @@ def setup_ranger_hive(upgrade_type = None):
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
                         stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
-    Logger.info('Ranger admin not installed')
+    Logger.info('Ranger Hive plugin is not enabled')

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
index 82849c8..6c7ff69 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/params.py
@@ -34,6 +34,7 @@ from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs
 
 # server configurations
 config = Script.get_config()
@@ -166,41 +167,66 @@ else:
     kafka_jaas_principal = None
     kafka_keytab_path = None
 
-# ***********************  RANGER PLUGIN CHANGES ***********************
+# for curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger kafka plugin section start
+
 # ranger host
-# **********************************************************************
 ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
 has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 
 ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
-is_supported_kafka_ranger = config['configurations']['kafka-env']['is_supported_kafka_ranger']
 
-#ranger kafka properties
-if has_ranger_admin and is_supported_kafka_ranger:
+# ranger kafka plugin enabled property
+enable_ranger_kafka = default("configurations/ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled", "No")
+enable_ranger_kafka = True if enable_ranger_kafka.lower() == 'yes' else False
 
-  enable_ranger_kafka = config['configurations']['ranger-kafka-plugin-properties']['ranger-kafka-plugin-enabled']
-  enable_ranger_kafka = not is_empty(enable_ranger_kafka) and enable_ranger_kafka.lower() == 'yes'
-  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-  if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
+# ranger kafka-plugin supported flag, instead of dependending on is_supported_kafka_ranger/kafka-env.xml, using stack feature
+is_supported_kafka_ranger = check_stack_feature(StackFeature.KAFKA_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
+
+# ranger kafka properties
+if enable_ranger_kafka and is_supported_kafka_ranger:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['ranger-kafka-security']['ranger.plugin.kafka.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
     policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-  xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
-  xa_audit_db_flavor = xa_audit_db_flavor.lower() if xa_audit_db_flavor else None
-  xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+
+  # ranger audit db user
   xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
   xa_audit_db_password = ''
-  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-  xa_db_host = config['configurations']['admin-properties']['db_host']
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  # ranger kafka service/repository name
   repo_name = str(config['clusterName']) + '_kafka'
   repo_name_value = config['configurations']['ranger-kafka-security']['ranger.plugin.kafka.service.name']
   if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
     repo_name = repo_name_value
 
   ranger_env = config['configurations']['ranger-env']
-  ranger_plugin_properties = config['configurations']['ranger-kafka-plugin-properties']
 
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_kafka:
+    external_admin_username = default('/configurations/ranger-kafka-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-kafka-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-kafka-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-kafka-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-kafka-plugin-properties']
   ranger_kafka_audit = config['configurations']['ranger-kafka-audit']
   ranger_kafka_audit_attrs = config['configuration_attributes']['ranger-kafka-audit']
   ranger_kafka_security = config['configurations']['ranger-kafka-security']
@@ -212,7 +238,7 @@ if has_ranger_admin and is_supported_kafka_ranger:
 
   ranger_plugin_config = {
     'username' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
-    'password' : unicode(config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
+    'password' : config['configurations']['ranger-kafka-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'],
     'zookeeper.connect' : config['configurations']['ranger-kafka-plugin-properties']['zookeeper.connect'],
     'commonNameForCertificate' : config['configurations']['ranger-kafka-plugin-properties']['common.name.for.certificate']
   }
@@ -232,64 +258,40 @@ if has_ranger_admin and is_supported_kafka_ranger:
     ranger_plugin_config['tag.download.auth.users'] = kafka_user
     ranger_plugin_config['ambari.service.check.user'] = policy_user
 
-  #For curl command in ranger plugin to get db connector
-  jdk_location = config['hostLevelParams']['jdk_location']
-  java_share_dir = '/usr/share/java'
+  downloaded_custom_connector = None
   previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
 
-  if stack_supports_ranger_audit_db:
-    if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "com.mysql.jdbc.Driver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
-      jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-      colon_count = xa_db_host.count(':')
-      if colon_count == 2 or colon_count == 0:
-        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-      else:
-        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-      jdbc_driver = "oracle.jdbc.OracleDriver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
-      jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "org.postgresql.Driver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-    elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
-      jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_target = format("{kafka_home}/libs/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  previous_jdbc_jar = format("{kafka_home}/libs/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
+
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{kafka_home}/libs/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{kafka_home}/libs/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
 
   xa_audit_db_is_enabled = False
-  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
   if xml_configurations_supported and stack_supports_ranger_audit_db:
     xa_audit_db_is_enabled = config['configurations']['ranger-kafka-audit']['xasecure.audit.destination.db']
+
   xa_audit_hdfs_is_enabled = default('/configurations/ranger-kafka-audit/xasecure.audit.destination.hdfs', False)
-  ssl_keystore_password = unicode(config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-  ssl_truststore_password = unicode(config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
+  ssl_keystore_password = config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
 
   stack_version = get_stack_version('kafka-broker')
   setup_ranger_env_sh_source = format('{stack_root}/{stack_version}/ranger-kafka-plugin/install/conf.templates/enable/kafka-ranger-env.sh')
   setup_ranger_env_sh_target = format("{conf_dir}/kafka-ranger-env.sh")
 
-  #For SQLA explicitly disable audit to DB for Ranger
-  if xa_audit_db_flavor == 'sqla':
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
     xa_audit_db_is_enabled = False
 
+# ranger kafka plugin section end
+
 namenode_hosts = default("/clusterHostInfo/namenode_host", [])
 has_namenode = not len(namenode_hosts) == 0
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
index 528dec2..e9719aa 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1/package/scripts/setup_ranger_kafka.py
@@ -22,7 +22,7 @@ from resource_management.libraries.functions.setup_ranger_plugin_xml import setu
 def setup_ranger_kafka():
   import params
 
-  if params.has_ranger_admin:
+  if params.enable_ranger_kafka:
 
     from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
 
@@ -87,4 +87,4 @@ def setup_ranger_kafka():
     else:
       Logger.info("Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations")
   else:
-    Logger.info('Ranger admin not installed')
+    Logger.info('Ranger Kafka plugin is not enabled')


[44/50] [abbrv] ambari git commit: AMBARI-19584 : hive view 2.0 added REST endpoint to enable and fetch table and column statistics (nitirajrathore)

Posted by nc...@apache.org.
AMBARI-19584 : hive view 2.0 added REST endpoint to enable and fetch table and column statistics (nitirajrathore)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/189fae52
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/189fae52
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/189fae52

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 189fae52f11a975bb814884a844d55fedd23a51d
Parents: 1523386
Author: Nitiraj Rathore <ni...@gmail.com>
Authored: Wed Jan 18 13:19:07 2017 +0530
Committer: Nitiraj Rathore <ni...@gmail.com>
Committed: Wed Jan 18 13:23:31 2017 +0530

----------------------------------------------------------------------
 .../view/hive20/internal/dto/ColumnInfo.java    |   1 -
 .../view/hive20/internal/dto/ColumnStats.java   | 170 +++++++++++++
 .../view/hive20/internal/dto/TableMeta.java     |   9 +
 .../view/hive20/internal/dto/TableStats.java    |  88 +++++++
 .../internal/parsers/TableMetaParserImpl.java   |  41 ++-
 .../generators/AnalyzeTableQueryGenerator.java  |  40 +++
 .../FetchColumnStatsQueryGenerator.java         |  40 +++
 .../view/hive20/resources/browser/DDLProxy.java | 226 +++++++++++------
 .../hive20/resources/browser/DDLService.java    |  60 +++++
 .../view/hive20/resources/jobs/JobService.java  |  28 +--
 .../jobs/ResultsPaginationController.java       | 251 ++++++++++++++-----
 .../hive20/resources/jobs/viewJobs/JobImpl.java |   4 +
 .../rest/postman/hive20.postman_collection.json | 128 +++++++++-
 13 files changed, 920 insertions(+), 166 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/ColumnInfo.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/ColumnInfo.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/ColumnInfo.java
index 2876348..44c82a0 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/ColumnInfo.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/ColumnInfo.java
@@ -25,7 +25,6 @@ import org.apache.commons.lang3.builder.EqualsBuilder;
  */
 public class ColumnInfo {
   private String name;
-  // TODO : to be broken into datatype + precision + scale for better comparison
   private String type;
   private Integer precision;
   private Integer scale;

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/ColumnStats.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/ColumnStats.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/ColumnStats.java
new file mode 100644
index 0000000..190ecd3
--- /dev/null
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/ColumnStats.java
@@ -0,0 +1,170 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.ambari.view.hive20.internal.dto;
+
+public class ColumnStats {
+  public static final String COLUMN_NAME = "# col_name";
+  public static final String DATA_TYPE = "data_type";
+  public static final String MIN = "min";
+  public static final String MAX = "max";
+  public static final String NUM_NULLS = "num_nulls";
+  public static final String DISTINCT_COUNT = "distinct_count";
+  public static final String AVG_COL_LEN = "avg_col_len";
+  public static final String MAX_COL_LEN = "max_col_len";
+  public static final String NUM_TRUES = "num_trues";
+  public static final String NUM_FALSES = "num_falses";
+  public static final String COMMENT = "comment";
+
+  private String databaseName;
+  private String tableName;
+  private String columnName;
+  private String dataType;
+  private String min;
+  private String max;
+  private String numNulls;
+  private String distinctCount;
+  private String avgColLen;
+  private String maxColLen;
+  private String numTrues;
+  private String numFalse;
+  private String comment;
+
+  public String getDatabaseName() {
+    return databaseName;
+  }
+
+  public void setDatabaseName(String databaseName) {
+    this.databaseName = databaseName;
+  }
+
+  public String getTableName() {
+    return tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  public String getColumnName() {
+    return columnName;
+  }
+
+  public void setColumnName(String columnName) {
+    this.columnName = columnName;
+  }
+
+  public String getDataType() {
+    return dataType;
+  }
+
+  public void setDataType(String dataType) {
+    this.dataType = dataType;
+  }
+
+  public String getMin() {
+    return min;
+  }
+
+  public void setMin(String min) {
+    this.min = min;
+  }
+
+  public String getMax() {
+    return max;
+  }
+
+  public void setMax(String max) {
+    this.max = max;
+  }
+
+  public String getNumNulls() {
+    return numNulls;
+  }
+
+  public void setNumNulls(String numNulls) {
+    this.numNulls = numNulls;
+  }
+
+  public String getDistinctCount() {
+    return distinctCount;
+  }
+
+  public void setDistinctCount(String distinctCount) {
+    this.distinctCount = distinctCount;
+  }
+
+  public String getAvgColLen() {
+    return avgColLen;
+  }
+
+  public void setAvgColLen(String avgColLen) {
+    this.avgColLen = avgColLen;
+  }
+
+  public String getMaxColLen() {
+    return maxColLen;
+  }
+
+  public void setMaxColLen(String maxColLen) {
+    this.maxColLen = maxColLen;
+  }
+
+  public String getNumTrues() {
+    return numTrues;
+  }
+
+  public void setNumTrues(String numTrues) {
+    this.numTrues = numTrues;
+  }
+
+  public String getNumFalse() {
+    return numFalse;
+  }
+
+  public void setNumFalse(String numFalse) {
+    this.numFalse = numFalse;
+  }
+
+  public String getComment() {
+    return comment;
+  }
+
+  public void setComment(String comment) {
+    this.comment = comment;
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder sb = new StringBuilder("ColumnStats{");
+    sb.append("tableName='").append(tableName).append('\'');
+    sb.append(", columnName='").append(columnName).append('\'');
+    sb.append(", dataType='").append(dataType).append('\'');
+    sb.append(", min='").append(min).append('\'');
+    sb.append(", max='").append(max).append('\'');
+    sb.append(", numNulls='").append(numNulls).append('\'');
+    sb.append(", distinctCount='").append(distinctCount).append('\'');
+    sb.append(", avgColLen='").append(avgColLen).append('\'');
+    sb.append(", maxColLen='").append(maxColLen).append('\'');
+    sb.append(", numTrues='").append(numTrues).append('\'');
+    sb.append(", numFalse='").append(numFalse).append('\'');
+    sb.append(", comment='").append(comment).append('\'');
+    sb.append('}');
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableMeta.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableMeta.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableMeta.java
index f47e76c..861d132 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableMeta.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableMeta.java
@@ -32,6 +32,7 @@ public class TableMeta implements Serializable{
   private String ddl;
   private PartitionInfo partitionInfo;
   private DetailedTableInfo detailedInfo;
+  private TableStats tableStats;
   private StorageInfo storageInfo;
   private ViewInfo viewInfo;
 
@@ -107,6 +108,14 @@ public class TableMeta implements Serializable{
     this.viewInfo = viewInfo;
   }
 
+  public TableStats getTableStats() {
+    return tableStats;
+  }
+
+  public void setTableStats(TableStats tableStats) {
+    this.tableStats = tableStats;
+  }
+
   @Override
   public String toString() {
     final StringBuilder sb = new StringBuilder("TableMeta{");

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableStats.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableStats.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableStats.java
new file mode 100644
index 0000000..b8b4f07
--- /dev/null
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/dto/TableStats.java
@@ -0,0 +1,88 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.ambari.view.hive20.internal.dto;
+
+/**
+ * this will be returned as a part of TableMeta which table info is called.
+ * It includes the part of DetailedTableInfo which contain statistics related data.
+ */
+public class TableStats {
+  public static final String NUM_FILES = "numFiles";
+  public static final String COLUMN_STATS_ACCURATE = "COLUMN_STATS_ACCURATE";
+  public static final String RAW_DATA_SIZE = "rawDataSize";
+  public static final String TOTAL_SIZE = "totalSize";
+
+  private Boolean isTableStatsEnabled;
+  private Integer numFiles;
+  private String columnStatsAccurate;
+  private Integer rawDataSize;
+  private Integer totalSize;
+
+  public Boolean getTableStatsEnabled() {
+    return isTableStatsEnabled;
+  }
+
+  public void setTableStatsEnabled(Boolean tableStatsEnabled) {
+    isTableStatsEnabled = tableStatsEnabled;
+  }
+
+  public Integer getNumFiles() {
+    return numFiles;
+  }
+
+  public void setNumFiles(Integer numFiles) {
+    this.numFiles = numFiles;
+  }
+
+  public String getColumnStatsAccurate() {
+    return columnStatsAccurate;
+  }
+
+  public void setColumnStatsAccurate(String columnStatsAccurate) {
+    this.columnStatsAccurate = columnStatsAccurate;
+  }
+
+  public Integer getRawDataSize() {
+    return rawDataSize;
+  }
+
+  public void setRawDataSize(Integer rawDataSize) {
+    this.rawDataSize = rawDataSize;
+  }
+
+  public Integer getTotalSize() {
+    return totalSize;
+  }
+
+  public void setTotalSize(Integer totalSize) {
+    this.totalSize = totalSize;
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder sb = new StringBuilder("TableStats{");
+    sb.append("isStatsEnabled='").append(isTableStatsEnabled).append('\'');
+    sb.append(", numFiles='").append(numFiles).append('\'');
+    sb.append(", columnStatsAccurate='").append(columnStatsAccurate).append('\'');
+    sb.append(", rawDataSize='").append(rawDataSize).append('\'');
+    sb.append(", totalSize='").append(totalSize).append('\'');
+    sb.append('}');
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/parsers/TableMetaParserImpl.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/parsers/TableMetaParserImpl.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/parsers/TableMetaParserImpl.java
index 5cae34a..b0c9fe4 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/parsers/TableMetaParserImpl.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/parsers/TableMetaParserImpl.java
@@ -24,7 +24,9 @@ import org.apache.ambari.view.hive20.internal.dto.DetailedTableInfo;
 import org.apache.ambari.view.hive20.internal.dto.PartitionInfo;
 import org.apache.ambari.view.hive20.internal.dto.StorageInfo;
 import org.apache.ambari.view.hive20.internal.dto.TableMeta;
+import org.apache.ambari.view.hive20.internal.dto.TableStats;
 import org.apache.ambari.view.hive20.internal.dto.ViewInfo;
+import org.apache.parquet.Strings;
 
 import javax.inject.Inject;
 import java.util.List;
@@ -52,12 +54,11 @@ public class TableMetaParserImpl implements TableMetaParser<TableMeta> {
   @Inject
   private ViewInfoParser viewInfoParser;
 
-
-
   @Override
   public TableMeta parse(String database, String table, List<Row> createTableStatementRows, List<Row> describeFormattedRows) {
     String createTableStatement = createTableStatementParser.parse(createTableStatementRows);
     DetailedTableInfo tableInfo = detailedTableInfoParser.parse(describeFormattedRows);
+    TableStats tableStats = getTableStats(tableInfo);
     StorageInfo storageInfo = storageInfoParser.parse(describeFormattedRows);
     List<ColumnInfo> columns = columnInfoParser.parse(describeFormattedRows);
     PartitionInfo partitionInfo = partitionInfoParser.parse(describeFormattedRows);
@@ -74,6 +75,42 @@ public class TableMetaParserImpl implements TableMetaParser<TableMeta> {
     meta.setDetailedInfo(tableInfo);
     meta.setStorageInfo(storageInfo);
     meta.setViewInfo(viewInfo);
+    meta.setTableStats(tableStats);
     return meta;
   }
+
+  private TableStats getTableStats(DetailedTableInfo tableInfo) {
+    TableStats tableStats = new TableStats();
+    tableStats.setTableStatsEnabled(false);
+
+    String numFiles = tableInfo.getParameters().get(TableStats.NUM_FILES);
+    tableInfo.getParameters().remove(TableStats.NUM_FILES);
+
+    String columnStatsAccurate = tableInfo.getParameters().get(TableStats.COLUMN_STATS_ACCURATE);
+    tableInfo.getParameters().remove(TableStats.COLUMN_STATS_ACCURATE);
+
+    String rawDataSize = tableInfo.getParameters().get(TableStats.RAW_DATA_SIZE);
+    tableInfo.getParameters().remove(TableStats.RAW_DATA_SIZE);
+
+    String totalSize = tableInfo.getParameters().get(TableStats.TOTAL_SIZE);
+    tableInfo.getParameters().remove(TableStats.TOTAL_SIZE);
+
+    if(!Strings.isNullOrEmpty(numFiles) && !Strings.isNullOrEmpty(numFiles.trim())){
+      tableStats.setTableStatsEnabled(true);
+      tableStats.setNumFiles(Integer.valueOf(numFiles.trim()));
+    }
+
+    if(!Strings.isNullOrEmpty(rawDataSize) && !Strings.isNullOrEmpty(rawDataSize.trim())){
+      tableStats.setTableStatsEnabled(true);
+      tableStats.setRawDataSize(Integer.valueOf(rawDataSize.trim()));
+    }
+
+    if(!Strings.isNullOrEmpty(totalSize) && !Strings.isNullOrEmpty(totalSize.trim())){
+      tableStats.setTableStatsEnabled(true);
+      tableStats.setTotalSize(Integer.valueOf(totalSize.trim()));
+    }
+
+    tableStats.setColumnStatsAccurate(columnStatsAccurate);
+    return tableStats;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/AnalyzeTableQueryGenerator.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/AnalyzeTableQueryGenerator.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/AnalyzeTableQueryGenerator.java
new file mode 100644
index 0000000..902d959
--- /dev/null
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/AnalyzeTableQueryGenerator.java
@@ -0,0 +1,40 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.ambari.view.hive20.internal.query.generators;
+
+import com.google.common.base.Optional;
+import org.apache.ambari.view.hive20.exceptions.ServiceException;
+
+public class AnalyzeTableQueryGenerator implements QueryGenerator {
+  private final String databaseName;
+  private final String tableName;
+  private final Boolean shouldAnalyzeColumns;
+
+  public AnalyzeTableQueryGenerator(String databaseName, String tableName, Boolean shouldAnalyzeColumns) {
+    this.databaseName = databaseName;
+    this.tableName = tableName;
+    this.shouldAnalyzeColumns = shouldAnalyzeColumns;
+  }
+
+  @Override
+  public Optional<String> getQuery() throws ServiceException {
+    return Optional.of("ANALYZE TABLE " + "`" + databaseName + "." + tableName + "`" + " COMPUTE STATISTICS " +
+      (shouldAnalyzeColumns? " FOR COLUMNS ": "") + ";");
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/FetchColumnStatsQueryGenerator.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/FetchColumnStatsQueryGenerator.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/FetchColumnStatsQueryGenerator.java
new file mode 100644
index 0000000..73b3698
--- /dev/null
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/internal/query/generators/FetchColumnStatsQueryGenerator.java
@@ -0,0 +1,40 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.ambari.view.hive20.internal.query.generators;
+
+import com.google.common.base.Optional;
+import org.apache.ambari.view.hive20.exceptions.ServiceException;
+
+public class FetchColumnStatsQueryGenerator implements QueryGenerator{
+  private final String databaseName;
+  private final String tableName;
+  private final String columnName;
+
+  public FetchColumnStatsQueryGenerator(String databaseName, String tableName, String columnName) {
+    this.databaseName = databaseName;
+    this.tableName = tableName;
+    this.columnName = columnName;
+  }
+
+  @Override
+  public Optional<String> getQuery() throws ServiceException {
+    return Optional.of("DESCRIBE FORMATTED " + "`" + this.databaseName + "." + this.tableName +  "." + this.columnName +
+      "`" );
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLProxy.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLProxy.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLProxy.java
index 8d995dd..7210c75 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLProxy.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLProxy.java
@@ -32,8 +32,10 @@ import org.apache.ambari.view.hive20.actor.DatabaseManager;
 import org.apache.ambari.view.hive20.client.ConnectionConfig;
 import org.apache.ambari.view.hive20.client.DDLDelegator;
 import org.apache.ambari.view.hive20.client.DDLDelegatorImpl;
+import org.apache.ambari.view.hive20.client.HiveClientException;
 import org.apache.ambari.view.hive20.client.Row;
 import org.apache.ambari.view.hive20.exceptions.ServiceException;
+import org.apache.ambari.view.hive20.internal.dto.ColumnStats;
 import org.apache.ambari.view.hive20.internal.dto.DatabaseInfo;
 import org.apache.ambari.view.hive20.internal.dto.DatabaseResponse;
 import org.apache.ambari.view.hive20.internal.dto.TableInfo;
@@ -41,11 +43,14 @@ import org.apache.ambari.view.hive20.internal.dto.TableMeta;
 import org.apache.ambari.view.hive20.internal.dto.TableResponse;
 import org.apache.ambari.view.hive20.internal.parsers.TableMetaParserImpl;
 import org.apache.ambari.view.hive20.internal.query.generators.AlterTableQueryGenerator;
+import org.apache.ambari.view.hive20.internal.query.generators.AnalyzeTableQueryGenerator;
 import org.apache.ambari.view.hive20.internal.query.generators.CreateTableQueryGenerator;
 import org.apache.ambari.view.hive20.internal.query.generators.DeleteDatabaseQueryGenerator;
 import org.apache.ambari.view.hive20.internal.query.generators.DeleteTableQueryGenerator;
+import org.apache.ambari.view.hive20.internal.query.generators.FetchColumnStatsQueryGenerator;
 import org.apache.ambari.view.hive20.internal.query.generators.RenameTableQueryGenerator;
 import org.apache.ambari.view.hive20.resources.jobs.JobServiceInternal;
+import org.apache.ambari.view.hive20.resources.jobs.ResultsPaginationController;
 import org.apache.ambari.view.hive20.resources.jobs.viewJobs.Job;
 import org.apache.ambari.view.hive20.resources.jobs.viewJobs.JobController;
 import org.apache.ambari.view.hive20.resources.jobs.viewJobs.JobImpl;
@@ -116,6 +121,20 @@ public class DDLProxy {
     return transformToTableResponse(tableOptional.get(), databaseName);
   }
 
+  public Job getColumnStatsJob(final String databaseName, final String tableName, final String columnName,
+                         JobResourceManager resourceManager) throws ServiceException {
+    FetchColumnStatsQueryGenerator queryGenerator = new FetchColumnStatsQueryGenerator(databaseName, tableName,
+      columnName);
+    Optional<String> q = queryGenerator.getQuery();
+    String jobTitle = "Fetch column stats for " + databaseName + "." + tableName + "." + columnName;
+    if(q.isPresent()) {
+      String query = q.get();
+      return createJob(databaseName, query, jobTitle, resourceManager);
+    }else{
+      throw new ServiceException("Failed to generate job for {}" + jobTitle);
+    }
+  }
+
   public TableMeta getTableProperties(ViewContext context, ConnectionConfig connectionConfig, String databaseName, String tableName) {
     DDLDelegator delegator = new DDLDelegatorImpl(context, ConnectionSystem.getInstance().getActorSystem(), ConnectionSystem.getInstance().getOperationController(context));
     List<Row> createTableStatementRows = delegator.getTableCreateStatement(connectionConfig, databaseName, tableName);
@@ -222,40 +241,14 @@ public class DDLProxy {
 
   public Job createTable(String databaseName, TableMeta tableMeta, JobResourceManager resourceManager) throws ServiceException {
     String createTableQuery = this.generateCreateTableDDL(databaseName, tableMeta);
-    Map jobInfo = new HashMap<>();
-    jobInfo.put("title", "Create table " + tableMeta.getDatabase() + "." + tableMeta.getTable());
-    jobInfo.put("forcedContent", createTableQuery);
-    jobInfo.put("dataBase", databaseName);
-
-    try {
-      Job job = new JobImpl(jobInfo);
-      JobController createdJobController = new JobServiceInternal().createJob(job, resourceManager);
-      Job returnableJob = createdJobController.getJobPOJO();
-      LOG.info("returning job with id {} for create table {}", returnableJob.getId(), tableMeta.getTable());
-      return returnableJob;
-    } catch (Throwable e) {
-      LOG.error("Exception occurred while creating the table for create Query : {}", createTableQuery, e);
-      throw new ServiceException(e);
-    }
+    String jobTitle = "Create table " + tableMeta.getDatabase() + "." + tableMeta.getTable();
+    return createJob(databaseName, createTableQuery, jobTitle, resourceManager);
   }
 
   public Job deleteTable(String databaseName, String tableName, JobResourceManager resourceManager) throws ServiceException {
     String deleteTableQuery = generateDeleteTableDDL(databaseName, tableName);
-    Map jobInfo = new HashMap<>();
-    jobInfo.put("title", "Delete table " + databaseName + "." + tableName);
-    jobInfo.put("forcedContent", deleteTableQuery);
-    jobInfo.put("dataBase", databaseName);
-
-    try {
-      Job job = new JobImpl(jobInfo);
-      JobController createdJobController = new JobServiceInternal().createJob(job, resourceManager);
-      Job returnableJob = createdJobController.getJobPOJO();
-      LOG.info("returning job with id {} for the deletion of table : {}", returnableJob.getId(), tableName);
-      return returnableJob;
-    } catch (Throwable e) {
-      LOG.error("Exception occurred while deleting the table for delete Query : {}", deleteTableQuery, e);
-      throw new ServiceException(e);
-    }
+    String jobTitle = "Delete table " + databaseName + "." + tableName;
+    return createJob(databaseName, deleteTableQuery, jobTitle, resourceManager);
   }
 
   public String generateDeleteTableDDL(String databaseName, String tableName) throws ServiceException {
@@ -270,21 +263,8 @@ public class DDLProxy {
 
   public Job alterTable(ViewContext context, ConnectionConfig hiveConnectionConfig, String databaseName, String oldTableName, TableMeta newTableMeta, JobResourceManager resourceManager) throws ServiceException {
     String alterQuery = generateAlterTableQuery(context, hiveConnectionConfig, databaseName, oldTableName, newTableMeta);
-    Map jobInfo = new HashMap<>();
-    jobInfo.put("title", "Alter table " + databaseName + "." + oldTableName);
-    jobInfo.put("forcedContent", alterQuery);
-    jobInfo.put("dataBase", databaseName);
-
-    try {
-      Job job = new JobImpl(jobInfo);
-      JobController createdJobController = new JobServiceInternal().createJob(job, resourceManager);
-      Job returnableJob = createdJobController.getJobPOJO();
-      LOG.info("returning job with id {} for alter table {}", returnableJob.getId(), oldTableName);
-      return returnableJob;
-    } catch (Throwable e) {
-      LOG.error("Exception occurred while creating the table for create Query : {}", alterQuery, e);
-      throw new ServiceException(e);
-    }
+    String jobTitle = "Alter table " + databaseName + "." + oldTableName;
+    return createJob(databaseName, alterQuery, jobTitle, resourceManager);
   }
 
   public String generateAlterTableQuery(ViewContext context, ConnectionConfig hiveConnectionConfig, String databaseName, String oldTableName, TableMeta newTableMeta) throws ServiceException {
@@ -310,22 +290,9 @@ public class DDLProxy {
     Optional<String> renameTable = queryGenerator.getQuery();
     if(renameTable.isPresent()) {
       String renameQuery = renameTable.get();
-      LOG.info("Creating job for : {}", renameQuery);
-      Map jobInfo = new HashMap<>();
-      jobInfo.put("title", "Rename table " + oldDatabaseName + "." + oldTableName + " to " + newDatabaseName + "." + newTableName);
-      jobInfo.put("forcedContent", renameQuery);
-      jobInfo.put("dataBase", oldDatabaseName);
-
-      try {
-        Job job = new JobImpl(jobInfo);
-        JobController createdJobController = new JobServiceInternal().createJob(job, resourceManager);
-        Job returnableJob = createdJobController.getJobPOJO();
-        LOG.info("returning job with id {} for rename table {}", returnableJob.getId(), oldTableName);
-        return returnableJob;
-      } catch (Throwable e) {
-        LOG.error("Exception occurred while renaming the table for rename Query : {}", renameQuery, e);
-        throw new ServiceException(e);
-      }
+      String jobTitle = "Rename table " + oldDatabaseName + "." + oldTableName + " to " + newDatabaseName + "." +
+        newTableName;
+      return createJob(oldDatabaseName, renameQuery, jobTitle, resourceManager);
     }else{
       throw new ServiceException("Failed to generate rename table query for table " + oldDatabaseName + "." +
         oldTableName);
@@ -337,24 +304,129 @@ public class DDLProxy {
     Optional<String> deleteDatabase = queryGenerator.getQuery();
     if(deleteDatabase.isPresent()) {
       String deleteQuery = deleteDatabase.get();
-      LOG.info("Creating job for : {}", deleteQuery );
-      Map jobInfo = new HashMap<>();
-      jobInfo.put("title", "Delete database " + databaseName);
-      jobInfo.put("forcedContent", deleteQuery);
-      jobInfo.put("dataBase", databaseName);
-
-      try {
-        Job job = new JobImpl(jobInfo);
-        JobController createdJobController = new JobServiceInternal().createJob(job, resourceManager);
-        Job returnableJob = createdJobController.getJobPOJO();
-        LOG.info("returning job with id {} for deleting database {}", returnableJob.getId(), databaseName);
-        return returnableJob;
-      } catch (Throwable e) {
-        LOG.error("Exception occurred while renaming the table for rename Query : {}", deleteQuery, e);
-        throw new ServiceException(e);
-      }
+      return createJob(databaseName, deleteQuery, "Delete database " + databaseName , resourceManager);
     }else{
       throw new ServiceException("Failed to generate delete database query for database " + databaseName);
     }
   }
+
+  public Job createJob(String databaseName, String deleteQuery, String jobTitle, JobResourceManager resourceManager)
+    throws ServiceException {
+    LOG.info("Creating job for : {}", deleteQuery );
+    Map jobInfo = new HashMap<>();
+    jobInfo.put("title", jobTitle);
+    jobInfo.put("forcedContent", deleteQuery);
+    jobInfo.put("dataBase", databaseName);
+    jobInfo.put("referrer", JobImpl.REFERRER.INTERNAL.name());
+
+    try {
+      Job job = new JobImpl(jobInfo);
+      JobController createdJobController = new JobServiceInternal().createJob(job, resourceManager);
+      Job returnableJob = createdJobController.getJobPOJO();
+      LOG.info("returning job with id {} for {}", returnableJob.getId(), jobTitle);
+      return returnableJob;
+    } catch (Throwable e) {
+      LOG.error("Exception occurred while {} : {}", jobTitle, deleteQuery, e);
+      throw new ServiceException(e);
+    }
+  }
+
+  public Job analyzeTable(String databaseName, String tableName, Boolean shouldAnalyzeColumns, JobResourceManager resourceManager) throws ServiceException {
+    AnalyzeTableQueryGenerator queryGenerator = new AnalyzeTableQueryGenerator(databaseName, tableName, shouldAnalyzeColumns);
+    Optional<String> analyzeTable = queryGenerator.getQuery();
+    String jobTitle = "Analyze table " + databaseName + "." + tableName;
+    if(analyzeTable.isPresent()) {
+      String query = analyzeTable.get();
+      return createJob(databaseName, query, jobTitle, resourceManager);
+    }else{
+      throw new ServiceException("Failed to generate job for {}" + jobTitle);
+    }
+  }
+
+  public ColumnStats fetchColumnStats(String columnName, String jobId, ViewContext context) throws ServiceException {
+    try {
+      ResultsPaginationController.ResultsResponse results = ResultsPaginationController.getResult(jobId, null, null, null, null, context);
+      if(results.getHasResults()){
+       List<String[]> rows = results.getRows();
+       Map<Integer, String> headerMap = new HashMap<>();
+       boolean header = true;
+        for(String[] row : rows){
+          if(header){
+            for(int i = 0 ; i < row.length; i++){
+              if(!Strings.isNullOrEmpty(row[i])){
+                headerMap.put(i, row[i].trim());
+              }
+            }
+            header = false;
+          }
+          else if(row.length > 0 ){
+            if(columnName.equals(row[0])){ // the first column of the row contains column name
+              return createColumnStats(row, headerMap);
+            }
+          }
+        }
+      }else{
+        throw new ServiceException("Cannot find any result for this jobId: " + jobId);
+      }
+    } catch (HiveClientException e) {
+      LOG.error("Exception occurred while fetching results for column statistics with jobId: {}", jobId, e);
+      throw new ServiceException(e);
+    }
+
+    LOG.error("Column stats not found in the fetched results.");
+    throw new ServiceException("Could not find the column stats in the result.");
+  }
+
+  /**
+   * order of values in array
+   *  row [# col_name, data_type, min, max, num_nulls, distinct_count, avg_col_len, max_col_len,num_trues,num_falses,comment]
+   * indexes : 0           1        2    3     4             5               6             7           8         9    10
+   * @param row
+   * @param headerMap
+   * @return
+   */
+  private ColumnStats createColumnStats(String[] row, Map<Integer, String> headerMap) throws ServiceException {
+    if(null == row){
+      throw new ServiceException("row cannot be null.");
+    }
+    ColumnStats columnStats = new ColumnStats();
+    for(int i = 0 ; i < row.length; i++){
+      switch(headerMap.get(i)){
+        case ColumnStats.COLUMN_NAME:
+          columnStats.setColumnName(row[i]);
+          break;
+        case ColumnStats.DATA_TYPE:
+          columnStats.setDataType(row[i]);
+          break;
+        case ColumnStats.MIN:
+          columnStats.setMin(row[i]);
+          break;
+        case ColumnStats.MAX:
+          columnStats.setMax(row[i]);
+          break;
+        case ColumnStats.NUM_NULLS:
+          columnStats.setNumNulls(row[i]);
+          break;
+        case ColumnStats.DISTINCT_COUNT:
+          columnStats.setDistinctCount(row[i]);
+          break;
+        case ColumnStats.AVG_COL_LEN:
+          columnStats.setAvgColLen(row[i]);
+          break;
+        case ColumnStats.MAX_COL_LEN:
+          columnStats.setMaxColLen(row[i]);
+          break;
+        case ColumnStats.NUM_TRUES:
+          columnStats.setNumTrues(row[i]);
+          break;
+        case ColumnStats.NUM_FALSES:
+          columnStats.setNumFalse(row[i]);
+          break;
+        case ColumnStats.COMMENT:
+          columnStats.setComment(row[i]);
+      }
+    }
+
+    return columnStats;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLService.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLService.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLService.java
index e142baf..5c955a2 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLService.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/browser/DDLService.java
@@ -21,6 +21,7 @@ package org.apache.ambari.view.hive20.resources.browser;
 import org.apache.ambari.view.hive20.BaseService;
 import org.apache.ambari.view.hive20.client.ConnectionConfig;
 import org.apache.ambari.view.hive20.exceptions.ServiceException;
+import org.apache.ambari.view.hive20.internal.dto.ColumnStats;
 import org.apache.ambari.view.hive20.internal.dto.DatabaseResponse;
 import org.apache.ambari.view.hive20.internal.dto.TableMeta;
 import org.apache.ambari.view.hive20.internal.dto.TableResponse;
@@ -28,6 +29,7 @@ import org.apache.ambari.view.hive20.resources.jobs.viewJobs.Job;
 import org.apache.ambari.view.hive20.resources.jobs.viewJobs.JobResourceManager;
 import org.apache.ambari.view.hive20.utils.ServiceFormattedException;
 import org.apache.ambari.view.hive20.utils.SharedObjectsFactory;
+import org.apache.parquet.Strings;
 import org.json.simple.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -155,6 +157,28 @@ public class DDLService extends BaseService {
     }
   }
 
+  @PUT
+  @Path("databases/{database_id}/tables/{table_id}/analyze")
+  @Produces(MediaType.APPLICATION_JSON)
+  @Consumes(MediaType.APPLICATION_JSON)
+  public Response analyzeTable(@PathParam("database_id") String databaseName, @PathParam("table_id") String tableName,
+                              @QueryParam("analyze_columns") String analyzeColumns) {
+    Boolean shouldAnalyzeColumns = Boolean.FALSE;
+    if(!Strings.isNullOrEmpty(analyzeColumns)){
+      shouldAnalyzeColumns = Boolean.valueOf(analyzeColumns.trim());
+    }
+    try {
+      Job job = proxy.analyzeTable(databaseName, tableName, shouldAnalyzeColumns, getResourceManager());
+      JSONObject response = new JSONObject();
+      response.put("job", job);
+      return Response.status(Response.Status.ACCEPTED).entity(response).build();
+    } catch (ServiceException e) {
+      LOG.error("Exception occurred while analyzing table for database {}, table: {}, analyzeColumns: {}" ,
+        databaseName, tableName, analyzeColumns, e);
+      throw new ServiceFormattedException(e);
+    }
+  }
+
   @POST
   @Path("databases/{database_id}/tables/ddl")
   @Produces(MediaType.APPLICATION_JSON)
@@ -241,6 +265,42 @@ public class DDLService extends BaseService {
     return Response.ok(response).build();
   }
 
+  @GET
+  @Path("databases/{database_id}/tables/{table_id}/column/{column_id}/stats")
+  @Produces(MediaType.APPLICATION_JSON)
+  @Consumes(MediaType.APPLICATION_JSON)
+  public Response getColumnStats(@PathParam("database_id") String databaseName, @PathParam("table_id") String tableName,
+                            @PathParam("column_id") String columnName) {
+    try {
+      Job job = proxy.getColumnStatsJob(databaseName, tableName, columnName, getResourceManager());
+      JSONObject response = new JSONObject();
+      response.put("job", job);
+      return Response.status(Response.Status.ACCEPTED).entity(response).build();
+    } catch (ServiceException e) {
+      LOG.error("Exception occurred while fetching column stats", databaseName, tableName, e);
+      throw new ServiceFormattedException(e);
+    }
+  }
+
+  @GET
+  @Path("databases/{database_id}/tables/{table_id}/column/{column_id}/fetch_stats")
+  @Produces(MediaType.APPLICATION_JSON)
+  @Consumes(MediaType.APPLICATION_JSON)
+  public Response fetchColumnStats(@PathParam("database_id") String databaseName, @PathParam("table_id") String
+    tablename, @PathParam("column_id") String columnName, @QueryParam("job_id") String jobId) {
+    try {
+      ColumnStats columnStats = proxy.fetchColumnStats(columnName, jobId, context);
+      columnStats.setTableName(tablename);
+      columnStats.setDatabaseName(databaseName);
+      JSONObject response = new JSONObject();
+      response.put("columnStats", columnStats);
+      return Response.status(Response.Status.ACCEPTED).entity(response).build();
+    } catch (ServiceException e) {
+      LOG.error("Exception occurred while fetching column stats for column: {} and jobId: {}", columnName, jobId,  e);
+      throw new ServiceFormattedException(e);
+    }
+  }
+
   public static class DDL {
     String query;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/JobService.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/JobService.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/JobService.java
index 675ea37..71cedd1 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/JobService.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/JobService.java
@@ -30,11 +30,8 @@ import org.apache.ambari.view.hive20.backgroundjobs.BackgroundJobController;
 import org.apache.ambari.view.hive20.client.AsyncJobRunner;
 import org.apache.ambari.view.hive20.client.AsyncJobRunnerImpl;
 import org.apache.ambari.view.hive20.client.ColumnDescription;
-import org.apache.ambari.view.hive20.client.Cursor;
-import org.apache.ambari.view.hive20.client.EmptyCursor;
 import org.apache.ambari.view.hive20.client.HiveClientException;
 import org.apache.ambari.view.hive20.client.NonPersistentCursor;
-import org.apache.ambari.view.hive20.client.Row;
 import org.apache.ambari.view.hive20.persistence.utils.ItemNotFound;
 import org.apache.ambari.view.hive20.resources.jobs.atsJobs.IATSParser;
 import org.apache.ambari.view.hive20.resources.jobs.viewJobs.Job;
@@ -79,7 +76,6 @@ import java.lang.reflect.InvocationTargetException;
 import java.sql.SQLException;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.Callable;
 
 /**
  * Servlet for queries
@@ -362,29 +358,7 @@ public class JobService extends BaseService {
                              @QueryParam("columns") final String requestedColumns) {
     try {
 
-      final String username = context.getUsername();
-
-      ConnectionSystem system = ConnectionSystem.getInstance();
-      final AsyncJobRunner asyncJobRunner = new AsyncJobRunnerImpl(context, system.getOperationController(context), system.getActorSystem());
-
-      return ResultsPaginationController.getInstance(context)
-              .request(jobId, searchId, true, fromBeginning, count, format,requestedColumns,
-                      new Callable<Cursor< Row, ColumnDescription >>() {
-                        @Override
-                        public Cursor call() throws Exception {
-                          Optional<NonPersistentCursor> cursor;
-                          if(fromBeginning != null && fromBeginning.equals("true")){
-                            cursor = asyncJobRunner.resetAndGetCursor(jobId, username);
-                          }
-                          else {
-                            cursor = asyncJobRunner.getCursor(jobId, username);
-                          }
-                          if(cursor.isPresent())
-                          return cursor.get();
-                          else
-                            return new EmptyCursor();
-                        }
-                      }).build();
+      return ResultsPaginationController.getResultAsResponse(jobId, fromBeginning, count, searchId, format, requestedColumns, context);
 
     } catch (WebApplicationException ex) {
       throw ex;

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/ResultsPaginationController.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/ResultsPaginationController.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/ResultsPaginationController.java
index 6efa2a9..e9b6d81 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/ResultsPaginationController.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/ResultsPaginationController.java
@@ -20,20 +20,26 @@ package org.apache.ambari.view.hive20.resources.jobs;
 
 
 import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.base.Strings;
 import com.google.common.collect.FluentIterable;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import org.apache.ambari.view.ViewContext;
+import org.apache.ambari.view.hive20.ConnectionSystem;
+import org.apache.ambari.view.hive20.client.AsyncJobRunner;
+import org.apache.ambari.view.hive20.client.AsyncJobRunnerImpl;
 import org.apache.ambari.view.hive20.client.ColumnDescription;
 import org.apache.ambari.view.hive20.client.Cursor;
+import org.apache.ambari.view.hive20.client.EmptyCursor;
 import org.apache.ambari.view.hive20.client.HiveClientException;
+import org.apache.ambari.view.hive20.client.NonPersistentCursor;
 import org.apache.ambari.view.hive20.client.Row;
 import org.apache.ambari.view.hive20.utils.BadRequestFormattedException;
 import org.apache.ambari.view.hive20.utils.ResultFetchFormattedException;
 import org.apache.ambari.view.hive20.utils.ResultNotReadyFormattedException;
 import org.apache.ambari.view.hive20.utils.ServiceFormattedException;
 import org.apache.commons.collections4.map.PassiveExpiringMap;
-import org.apache.hadoop.hbase.util.Strings;
 
 import javax.ws.rs.core.Response;
 import java.util.ArrayList;
@@ -65,6 +71,49 @@ public class ResultsPaginationController {
   private static final int DEFAULT_FETCH_COUNT = 50;
   private Map<String, Cursor<Row, ColumnDescription>> resultsCache;
 
+  public static Response getResultAsResponse(final String jobId, final String fromBeginning, Integer count, String searchId, String format, String requestedColumns, ViewContext context) throws HiveClientException {
+    final String username = context.getUsername();
+
+    ConnectionSystem system = ConnectionSystem.getInstance();
+    final AsyncJobRunner asyncJobRunner = new AsyncJobRunnerImpl(context, system.getOperationController(context), system.getActorSystem());
+
+    return getInstance(context)
+            .request(jobId, searchId, true, fromBeginning, count, format,requestedColumns,
+              createCallableMakeResultSets(jobId, fromBeginning, username, asyncJobRunner)).build();
+  }
+
+  public static ResultsResponse getResult(final String jobId, final String fromBeginning, Integer count, String
+    searchId, String requestedColumns, ViewContext context) throws HiveClientException {
+    final String username = context.getUsername();
+
+    ConnectionSystem system = ConnectionSystem.getInstance();
+    final AsyncJobRunner asyncJobRunner = new AsyncJobRunnerImpl(context, system.getOperationController(context), system.getActorSystem());
+
+    return getInstance(context)
+            .fetchResult(jobId, searchId, true, fromBeginning, count, requestedColumns,
+              createCallableMakeResultSets(jobId, fromBeginning, username, asyncJobRunner));
+  }
+
+  private static Callable<Cursor<Row, ColumnDescription>> createCallableMakeResultSets(final String jobId, final String
+    fromBeginning, final String username, final AsyncJobRunner asyncJobRunner) {
+    return new Callable<Cursor< Row, ColumnDescription >>() {
+      @Override
+      public Cursor call() throws Exception {
+        Optional<NonPersistentCursor> cursor;
+        if(fromBeginning != null && fromBeginning.equals("true")){
+          cursor = asyncJobRunner.resetAndGetCursor(jobId, username);
+        }
+        else {
+          cursor = asyncJobRunner.getCursor(jobId, username);
+        }
+        if(cursor.isPresent())
+        return cursor.get();
+        else
+          return new EmptyCursor();
+      }
+    };
+  }
+
   public static class CustomTimeToLiveExpirationPolicy extends PassiveExpiringMap.ConstantTimeToLiveExpirationPolicy<String, Cursor<Row, ColumnDescription>> {
     public CustomTimeToLiveExpirationPolicy(long timeToLiveMillis) {
       super(timeToLiveMillis);
@@ -125,72 +174,85 @@ public class ResultsPaginationController {
     return getResultsCache().get(key);
   }
 
-  public Response.ResponseBuilder request(String key, String searchId, boolean canExpire, String fromBeginning, Integer count, String format, String requestedColumns, Callable<Cursor<Row, ColumnDescription>> makeResultsSet) throws HiveClientException {
-    if (searchId == null)
-      searchId = DEFAULT_SEARCH_ID;
-    key = key + "?" + searchId;
-    if (!canExpire)
-      key = "$" + key;
-    if (fromBeginning != null && fromBeginning.equals("true") && getResultsCache().containsKey(key)) {
-
-      getResultsCache().remove(key);
-    }
-
-    Cursor<Row, ColumnDescription> resultSet = getResultsSet(key, makeResultsSet);
-
-    if (count == null)
-      count = DEFAULT_FETCH_COUNT;
-
-    List<ColumnDescription> allschema = resultSet.getDescriptions();
-    List<Row> allRowEntries = FluentIterable.from(resultSet)
-      .limit(count).toList();
+  /**
+   * returns the results in standard format
+   * @param key
+   * @param searchId
+   * @param canExpire
+   * @param fromBeginning
+   * @param count
+   * @param requestedColumns
+   * @param makeResultsSet
+   * @return
+   * @throws HiveClientException
+   */
+  public ResultsResponse fetchResult(String key, String searchId, boolean canExpire, String fromBeginning, Integer
+    count, String requestedColumns, Callable<Cursor<Row, ColumnDescription>> makeResultsSet) throws HiveClientException {
 
-    List<ColumnDescription> schema = allschema;
+    ResultProcessor resultProcessor = new ResultProcessor(key, searchId, canExpire, fromBeginning, count, requestedColumns, makeResultsSet).invoke();
+    List<Object[]> rows = resultProcessor.getRows();
+    List<ColumnDescription> schema = resultProcessor.getSchema();
+    Cursor<Row, ColumnDescription> resultSet = resultProcessor.getResultSet();
 
-    final Set<Integer> selectedColumns = getRequestedColumns(requestedColumns);
-    if (!selectedColumns.isEmpty()) {
-      schema = filter(allschema, selectedColumns);
-    }
+    int read = rows.size();
+    return getResultsResponse(rows, schema, resultSet, read);
+  }
 
-    List<Object[]> rows = FluentIterable.from(allRowEntries)
-      .transform(new Function<Row, Object[]>() {
-        @Override
-        public Object[] apply(Row input) {
-          if(!selectedColumns.isEmpty()) {
-            return filter(Lists.newArrayList(input.getRow()), selectedColumns).toArray();
-          } else {
-            return input.getRow();
-          }
-        }
-      }).toList();
+  /**
+   * returns the results in either D3 format or starndard format wrapped inside ResponseBuilder object.
+   * @param key
+   * @param searchId
+   * @param canExpire
+   * @param fromBeginning
+   * @param count : number of rows to fetch
+   * @param format : 'd3' or empty
+   * @param requestedColumns
+   * @param makeResultsSet
+   * @return
+   * @throws HiveClientException
+   */
+  public Response.ResponseBuilder request(String key, String searchId, boolean canExpire, String fromBeginning, Integer count, String format, String requestedColumns, Callable<Cursor<Row, ColumnDescription>> makeResultsSet) throws HiveClientException {
+    ResultProcessor resultProcessor = new ResultProcessor(key, searchId, canExpire, fromBeginning, count, requestedColumns, makeResultsSet).invoke();
+    List<Object[]> rows = resultProcessor.getRows();
+    List<ColumnDescription> schema = resultProcessor.getSchema();
+    Cursor<Row, ColumnDescription> resultSet = resultProcessor.getResultSet();
 
     int read = rows.size();
     if(format != null && format.equalsIgnoreCase("d3")) {
-      List<Map<String,Object>> results = new ArrayList<>();
-      for(int i=0; i<rows.size(); i++) {
-        Object[] row = rows.get(i);
-        Map<String, Object> keyValue = new HashMap<>(row.length);
-        for(int j=0; j<row.length; j++) {
-          //Replace dots in schema with underscore
-          String schemaName = schema.get(j).getName();
-          keyValue.put(schemaName.replace('.','_'), row[j]);
-        }
-        results.add(keyValue);
-      }
+      List<Map<String, Object>> results = getD3FormattedResult(rows, schema);
       return Response.ok(results);
     } else {
-      ResultsResponse resultsResponse = new ResultsResponse();
-      resultsResponse.setSchema(schema);
-      resultsResponse.setRows(rows);
-      resultsResponse.setReadCount(read);
-      resultsResponse.setHasNext(resultSet.hasNext());
-      //      resultsResponse.setSize(resultSet.size());
-      resultsResponse.setOffset(resultSet.getOffset());
-      resultsResponse.setHasResults(true);
+      ResultsResponse resultsResponse = getResultsResponse(rows, schema, resultSet, read);
       return Response.ok(resultsResponse);
     }
   }
 
+  public List<Map<String, Object>> getD3FormattedResult(List<Object[]> rows, List<ColumnDescription> schema) {
+    List<Map<String,Object>> results = new ArrayList<>();
+    for(int i=0; i<rows.size(); i++) {
+      Object[] row = rows.get(i);
+      Map<String, Object> keyValue = new HashMap<>(row.length);
+      for(int j=0; j<row.length; j++) {
+        //Replace dots in schema with underscore
+        String schemaName = schema.get(j).getName();
+        keyValue.put(schemaName.replace('.','_'), row[j]);
+      }
+      results.add(keyValue);
+    } return results;
+  }
+
+  public ResultsResponse getResultsResponse(List<Object[]> rows, List<ColumnDescription> schema, Cursor<Row, ColumnDescription> resultSet, int read) {
+    ResultsResponse resultsResponse = new ResultsResponse();
+    resultsResponse.setSchema(schema);
+    resultsResponse.setRows(rows);
+    resultsResponse.setReadCount(read);
+    resultsResponse.setHasNext(resultSet.hasNext());
+    //      resultsResponse.setSize(resultSet.size());
+    resultsResponse.setOffset(resultSet.getOffset());
+    resultsResponse.setHasResults(true);
+    return resultsResponse;
+  }
+
   private <T> List<T> filter(List<T> list, Set<Integer> selectedColumns) {
     List<T> filtered = Lists.newArrayList();
     for(int i: selectedColumns) {
@@ -202,7 +264,7 @@ public class ResultsPaginationController {
   }
 
   private Set<Integer> getRequestedColumns(String requestedColumns) {
-    if(Strings.isEmpty(requestedColumns)) {
+    if(Strings.isNullOrEmpty(requestedColumns)) {
       return new HashSet<>();
     }
     Set<Integer> selectedColumns = Sets.newHashSet();
@@ -216,7 +278,7 @@ public class ResultsPaginationController {
     return selectedColumns;
   }
 
-  private static class ResultsResponse {
+  public static class ResultsResponse {
     private List<ColumnDescription> schema;
     private List<String[]> rows;
     private int readCount;
@@ -283,4 +345,79 @@ public class ResultsPaginationController {
       this.hasResults = hasResults;
     }
   }
+
+  private class ResultProcessor {
+    private String key;
+    private String searchId;
+    private boolean canExpire;
+    private String fromBeginning;
+    private Integer count;
+    private String requestedColumns;
+    private Callable<Cursor<Row, ColumnDescription>> makeResultsSet;
+    private Cursor<Row, ColumnDescription> resultSet;
+    private List<ColumnDescription> schema;
+    private List<Object[]> rows;
+
+    public ResultProcessor(String key, String searchId, boolean canExpire, String fromBeginning, Integer count, String requestedColumns, Callable<Cursor<Row, ColumnDescription>> makeResultsSet) {
+      this.key = key;
+      this.searchId = searchId;
+      this.canExpire = canExpire;
+      this.fromBeginning = fromBeginning;
+      this.count = count;
+      this.requestedColumns = requestedColumns;
+      this.makeResultsSet = makeResultsSet;
+    }
+
+    public Cursor<Row, ColumnDescription> getResultSet() {
+      return resultSet;
+    }
+
+    public List<ColumnDescription> getSchema() {
+      return schema;
+    }
+
+    public List<Object[]> getRows() {
+      return rows;
+    }
+
+    public ResultProcessor invoke() {
+      if (searchId == null)
+        searchId = DEFAULT_SEARCH_ID;
+      key = key + "?" + searchId;
+      if (!canExpire)
+        key = "$" + key;
+      if (fromBeginning != null && fromBeginning.equals("true") && getResultsCache().containsKey(key)) {
+        getResultsCache().remove(key);
+      }
+
+      resultSet = getResultsSet(key, makeResultsSet);
+
+      if (count == null)
+        count = DEFAULT_FETCH_COUNT;
+
+      List<ColumnDescription> allschema = resultSet.getDescriptions();
+      List<Row> allRowEntries = FluentIterable.from(resultSet)
+        .limit(count).toList();
+
+      schema = allschema;
+
+      final Set<Integer> selectedColumns = getRequestedColumns(requestedColumns);
+      if (!selectedColumns.isEmpty()) {
+        schema = filter(allschema, selectedColumns);
+      }
+
+      rows = FluentIterable.from(allRowEntries)
+        .transform(new Function<Row, Object[]>() {
+          @Override
+          public Object[] apply(Row input) {
+            if (!selectedColumns.isEmpty()) {
+              return filter(Lists.newArrayList(input.getRow()), selectedColumns).toArray();
+            } else {
+              return input.getRow();
+            }
+          }
+        }).toList();
+      return this;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/viewJobs/JobImpl.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/viewJobs/JobImpl.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/viewJobs/JobImpl.java
index 85ffaf2..abb395d 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/viewJobs/JobImpl.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/resources/jobs/viewJobs/JobImpl.java
@@ -28,6 +28,10 @@ import java.util.Map;
  * Bean to represent saved query
  */
 public class JobImpl implements Job {
+  public enum REFERRER {
+    INTERNAL,
+    USER
+  }
   private String title = null;
   private String queryFile = null;
   private String statusDir = null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/189fae52/contrib/views/hive20/src/test/rest/postman/hive20.postman_collection.json
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/test/rest/postman/hive20.postman_collection.json b/contrib/views/hive20/src/test/rest/postman/hive20.postman_collection.json
index d674944..4f78b59 100644
--- a/contrib/views/hive20/src/test/rest/postman/hive20.postman_collection.json
+++ b/contrib/views/hive20/src/test/rest/postman/hive20.postman_collection.json
@@ -19,7 +19,7 @@
 						"showPassword": false
 					}
 				},
-				"url": "{{APP_BASE_URL}}/resources/ddl/databases/d1/tables/t1/info?_=1481634018195",
+				"url": "{{APP_BASE_URL}}/resources/ddl/databases/default/tables/tt1/info?_=1481634018195",
 				"method": "GET",
 				"header": [
 					{
@@ -367,7 +367,7 @@
 						"showPassword": false
 					}
 				},
-				"url": "{{APP_BASE_URL}}/resources/jobs/202",
+				"url": "{{APP_BASE_URL}}/resources/jobs/257",
 				"method": "GET",
 				"header": [
 					{
@@ -437,6 +437,130 @@
 				"description": "drop database "
 			},
 			"response": []
+		},
+		{
+			"name": "fetch column stats",
+			"request": {
+				"auth": {
+					"type": "basic",
+					"basic": {
+						"username": "admin",
+						"password": "admin",
+						"saveHelperData": true,
+						"showPassword": false
+					}
+				},
+				"url": "{{APP_BASE_URL}}/resources/ddl/databases/default/tables/tt1/column/i/stats",
+				"method": "GET",
+				"header": [
+					{
+						"key": "X-Requested-By",
+						"value": "ambari",
+						"description": ""
+					},
+					{
+						"key": "Authorization",
+						"value": "Basic YWRtaW46YWRtaW4=",
+						"description": ""
+					}
+				],
+				"body": {},
+				"description": "fetch column stats"
+			},
+			"response": []
+		},
+		{
+			"name": "fetch job results",
+			"request": {
+				"auth": {
+					"type": "basic",
+					"basic": {
+						"username": "admin",
+						"password": "admin",
+						"saveHelperData": true,
+						"showPassword": false
+					}
+				},
+				"url": "{{APP_BASE_URL}}/resources/jobs/101/results?first=true&_=1484636273461",
+				"method": "GET",
+				"header": [
+					{
+						"key": "X-Requested-By",
+						"value": "ambari",
+						"description": ""
+					},
+					{
+						"key": "Authorization",
+						"value": "Basic YWRtaW46YWRtaW4=",
+						"description": ""
+					}
+				],
+				"body": {},
+				"description": "fetch job results"
+			},
+			"response": []
+		},
+		{
+			"name": "fetch column Stats result",
+			"request": {
+				"auth": {
+					"type": "basic",
+					"basic": {
+						"username": "admin",
+						"password": "admin",
+						"saveHelperData": true,
+						"showPassword": false
+					}
+				},
+				"url": "{{APP_BASE_URL}}/resources/ddl/databases/default/tables/tt1/column/i/fetch_stats?job_id=255",
+				"method": "GET",
+				"header": [
+					{
+						"key": "X-Requested-By",
+						"value": "ambari",
+						"description": ""
+					},
+					{
+						"key": "Authorization",
+						"value": "Basic YWRtaW46YWRtaW4=",
+						"description": ""
+					}
+				],
+				"body": {},
+				"description": "fetch column Stats result"
+			},
+			"response": []
+		},
+		{
+			"name": "analyze table",
+			"request": {
+				"auth": {
+					"type": "basic",
+					"basic": {
+						"username": "admin",
+						"password": "admin",
+						"saveHelperData": true,
+						"showPassword": false
+					}
+				},
+				"url": "{{APP_BASE_URL}}/resources/ddl/databases/default/tables/t1/analyze?analyze_columns=true",
+				"method": "PUT",
+				"header": [
+					{
+						"key": "X-Requested-By",
+						"value": "ambari",
+						"description": ""
+					},
+					{
+						"key": "Authorization",
+						"value": "Basic YWRtaW46YWRtaW4=",
+						"description": ""
+					}
+				],
+				"body": {},
+				"description": "analyze table"
+			},
+			"response": []
 		}
 	]
 }
\ No newline at end of file


[46/50] [abbrv] ambari git commit: AMBARI-19423. Change HostStackVersionResourceProvider to be able to install packages on single host not belonging to any cluster (magyari_sandor)

Posted by nc...@apache.org.
AMBARI-19423. Change HostStackVersionResourceProvider to be able to install packages on single host not belonging to any cluster (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f684c2b8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f684c2b8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f684c2b8

Branch: refs/heads/branch-dev-patch-upgrade
Commit: f684c2b874c54108e8a313361a0ee8f856ff7dee
Parents: ed93a56
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Wed Jan 4 13:44:25 2017 +0100
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Wed Jan 18 11:56:40 2017 +0100

----------------------------------------------------------------------
 .../controller/AmbariActionExecutionHelper.java |  33 ++++--
 .../HostStackVersionResourceProvider.java       | 117 +++++++++++++++----
 .../HostStackVersionResourceProviderTest.java   |  96 +++++++++++++++
 3 files changed, 211 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f684c2b8/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 4fa942f..ec0f7d0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -227,17 +227,28 @@ public class AmbariActionExecutionHelper {
             || targetHostType.equals(TargetHostType.MAJORITY);
   }
 
-
   /**
    * Add tasks to the stage based on the requested action execution
-   *
    * @param actionContext  the context associated with the action
    * @param stage          stage into which tasks must be inserted
    * @param requestParams  all request parameters (may be null)
    * @throws AmbariException if the task can not be added
    */
   public void addExecutionCommandsToStage(final ActionExecutionContext actionContext, Stage stage,
-                                          Map<String, String> requestParams)
+                                          Map<String, String> requestParams) throws AmbariException {
+    addExecutionCommandsToStage(actionContext, stage, requestParams, true);
+  }
+
+  /**
+   * Add tasks to the stage based on the requested action execution
+   * @param actionContext
+   * @param stage
+   * @param requestParams
+   * @param checkHostIsMemberOfCluster if true AmbariException will be thrown in case host is not member of cluster.
+   * @throws AmbariException
+   */
+  public void addExecutionCommandsToStage(final ActionExecutionContext actionContext, Stage stage,
+                                          Map<String, String> requestParams, boolean checkHostIsMemberOfCluster)
       throws AmbariException {
 
     String actionName = actionContext.getActionName();
@@ -331,13 +342,15 @@ public class AmbariActionExecutionHelper {
               + "actionName=" + actionContext.getActionName());
     }
 
-    // Compare specified hosts to available hosts
-    if (!resourceFilter.getHostNames().isEmpty() && !candidateHosts.isEmpty()) {
-      for (String hostname : resourceFilter.getHostNames()) {
-        if (!candidateHosts.contains(hostname)) {
-          throw new AmbariException("Request specifies host " + hostname +
-            " but it is not a valid host based on the " +
-            "target service=" + serviceName + " and component=" + componentName);
+    if (checkHostIsMemberOfCluster) {
+      // Compare specified hosts to available hosts
+      if (!resourceFilter.getHostNames().isEmpty() && !candidateHosts.isEmpty()) {
+        for (String hostname : resourceFilter.getHostNames()) {
+          if (!candidateHosts.contains(hostname)) {
+            throw new AmbariException("Request specifies host " + hostname +
+              " but it is not a valid host based on the " +
+              "target service=" + serviceName + " and component=" + componentName);
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f684c2b8/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
index b8d7381..364a61e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProvider.java
@@ -66,6 +66,7 @@ import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.lang.BooleanUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.Validate;
 
@@ -91,6 +92,18 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
   protected static final String HOST_STACK_VERSION_REPOSITORIES_PROPERTY_ID    = PropertyHelper.getPropertyId("HostStackVersions", "repositories");
   protected static final String HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID    = PropertyHelper.getPropertyId("HostStackVersions", "repository_version");
 
+  /**
+   * Whether to force creating of install command on a host which is not member of any cluster yet.
+   */
+  protected static final String HOST_STACK_VERSION_FORCE_INSTALL_ON_NON_MEMBER_HOST_PROPERTY_ID = PropertyHelper
+    .getPropertyId("HostStackVersions", "force_non_member_install");
+
+  /**
+   * In case of force_non_member_install = true a list of component names must be provided in the request.
+   */
+  protected static final String HOST_STACK_VERSION_COMPONENT_NAMES_PROPERTY_ID = PropertyHelper.getPropertyId("HostStackVersions", "components");
+  protected static final String COMPONENT_NAME_PROPERTY_ID = "name";
+
   protected static final String INSTALL_PACKAGES_ACTION = "install_packages";
   protected static final String INSTALL_PACKAGES_FULL_NAME = "Install version";
 
@@ -111,7 +124,9 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
           HOST_STACK_VERSION_VERSION_PROPERTY_ID,
           HOST_STACK_VERSION_STATE_PROPERTY_ID,
           HOST_STACK_VERSION_REPOSITORIES_PROPERTY_ID,
-          HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID);
+          HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID,
+          HOST_STACK_VERSION_FORCE_INSTALL_ON_NON_MEMBER_HOST_PROPERTY_ID,
+          HOST_STACK_VERSION_COMPONENT_NAMES_PROPERTY_ID);
 
   private static Map<Type, String> keyPropertyIds = new HashMap<Type, String>() {
     {
@@ -261,9 +276,36 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
               String.format("The required property %s is not defined", requiredProperty));
     }
 
-    String clName = (String) propertyMap.get(HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
+    String clName = (String) propertyMap.get (HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
     hostName = (String) propertyMap.get(HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID);
     desiredRepoVersion = (String) propertyMap.get(HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID);
+    stackName = (String) propertyMap.get(HOST_STACK_VERSION_STACK_PROPERTY_ID);
+    stackVersion = (String) propertyMap.get(HOST_STACK_VERSION_VERSION_PROPERTY_ID);
+
+    boolean forceInstallOnNonMemberHost = false;
+    Set<Map<String, String>> componentNames = null;
+    String forceInstallOnNonMemberHostString = (String) propertyMap.get
+      (HOST_STACK_VERSION_FORCE_INSTALL_ON_NON_MEMBER_HOST_PROPERTY_ID);
+
+    if (BooleanUtils.toBoolean(forceInstallOnNonMemberHostString)) {
+      forceInstallOnNonMemberHost = true;
+      componentNames = (Set<Map<String, String>>) propertyMap.get(HOST_STACK_VERSION_COMPONENT_NAMES_PROPERTY_ID);
+      if (componentNames == null) {
+        throw new IllegalArgumentException("In case " + HOST_STACK_VERSION_FORCE_INSTALL_ON_NON_MEMBER_HOST_PROPERTY_ID + " is set to true, the list of " +
+          "components should be specified in request.");
+      }
+    }
+
+    RequestStageContainer req = createInstallPackagesRequest(hostName, desiredRepoVersion, stackName, stackVersion,
+      clName, forceInstallOnNonMemberHost, componentNames);
+    return getRequestStatus(req.getRequestStatusResponse());
+  }
+
+  private RequestStageContainer createInstallPackagesRequest(String hostName, final String desiredRepoVersion,
+                                                             String stackName, String stackVersion, String clName,
+                                                             boolean forceInstallOnNonMemberHost,
+                                                             Set<Map<String, String>> componentNames)
+    throws NoSuchParentResourceException, SystemException {
 
     Host host;
     try {
@@ -275,8 +317,6 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
     AmbariManagementController managementController = getManagementController();
     AmbariMetaInfo ami = managementController.getAmbariMetaInfo();
 
-    stackName = (String) propertyMap.get(HOST_STACK_VERSION_STACK_PROPERTY_ID);
-    stackVersion = (String) propertyMap.get(HOST_STACK_VERSION_VERSION_PROPERTY_ID);
     final StackId stackId = new StackId(stackName, stackVersion);
     if (!ami.isSupportedStack(stackName, stackVersion)) {
       throw new NoSuchParentResourceException(String.format("Stack %s is not supported",
@@ -327,17 +367,19 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
 
     HostVersionEntity hostVersEntity = hostVersionDAO.findByClusterStackVersionAndHost(clName, stackId,
             desiredRepoVersion, hostName);
-    if (hostVersEntity == null) {
-      throw new IllegalArgumentException(String.format(
-        "Repo version %s for stack %s is not available for host %s",
-        desiredRepoVersion, stackId, hostName));
-    }
-    if (hostVersEntity.getState() != RepositoryVersionState.INSTALLED &&
-            hostVersEntity.getState() != RepositoryVersionState.INSTALL_FAILED &&
-            hostVersEntity.getState() != RepositoryVersionState.OUT_OF_SYNC) {
-      throw new UnsupportedOperationException(String.format("Repo version %s for stack %s " +
-        "for host %s is in %s state. Can not transition to INSTALLING state",
-              desiredRepoVersion, stackId, hostName, hostVersEntity.getState().toString()));
+    if (!forceInstallOnNonMemberHost) {
+      if (hostVersEntity == null) {
+        throw new IllegalArgumentException(String.format(
+          "Repo version %s for stack %s is not available for host %s",
+          desiredRepoVersion, stackId, hostName));
+      }
+      if (hostVersEntity.getState() != RepositoryVersionState.INSTALLED &&
+        hostVersEntity.getState() != RepositoryVersionState.INSTALL_FAILED &&
+        hostVersEntity.getState() != RepositoryVersionState.OUT_OF_SYNC) {
+        throw new UnsupportedOperationException(String.format("Repo version %s for stack %s " +
+            "for host %s is in %s state. Can not transition to INSTALLING state",
+          desiredRepoVersion, stackId, hostName, hostVersEntity.getState().toString()));
+      }
     }
 
     List<OperatingSystemEntity> operatingSystems = repoVersionEnt.getOperatingSystems();
@@ -357,9 +399,34 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
     // For every host at cluster, determine packages for all installed services
     List<ServiceOsSpecific.Package> packages = new ArrayList<>();
     Set<String> servicesOnHost = new HashSet<>();
-    List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
-    for (ServiceComponentHost component : components) {
-      servicesOnHost.add(component.getServiceName());
+
+    if (forceInstallOnNonMemberHost) {
+      for (Map<String, String> componentProperties : componentNames) {
+
+        String componentName = componentProperties.get(COMPONENT_NAME_PROPERTY_ID);
+        if (StringUtils.isEmpty(componentName)) {
+          throw new IllegalArgumentException("Components list contains a component with no 'name' property");
+        }
+
+        String serviceName = null;
+        try {
+          serviceName = ami.getComponentToService(stackName, stackVersion, componentName.trim().toUpperCase());
+          if (serviceName == null) {
+            throw new IllegalArgumentException("Service not found for component : " + componentName);
+          }
+          servicesOnHost.add(serviceName);
+        } catch (AmbariException e) {
+          LOG.error("Service not found for component {}!", componentName, e);
+          throw new IllegalArgumentException("Service not found for component : " + componentName);
+        }
+
+      }
+
+    } else {
+      List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
+      for (ServiceComponentHost component : components) {
+        servicesOnHost.add(component.getServiceName());
+      }
     }
     List<String> blacklistedPackagePrefixes = configuration.getRollingUpgradeSkipPackagesPrefixes();
     for (String serviceName : servicesOnHost) {
@@ -409,7 +476,6 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
       params.put(KeyNames.PACKAGE_VERSION, xml.getPackageVersion(osFamily));
     }
 
-
     // Create custom action
     RequestResourceFilter filter = new RequestResourceFilter(null, null,
             Collections.singletonList(hostName));
@@ -452,21 +518,22 @@ public class HostStackVersionResourceProvider extends AbstractControllerResource
     req.addStages(Collections.singletonList(stage));
 
     try {
-      actionExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, null);
+      actionExecutionHelper.get().addExecutionCommandsToStage(actionContext, stage, null, !forceInstallOnNonMemberHost);
     } catch (AmbariException e) {
       throw new SystemException("Can not modify stage", e);
     }
 
     try {
-      hostVersEntity.setState(RepositoryVersionState.INSTALLING);
-      hostVersionDAO.merge(hostVersEntity);
-
-      cluster.recalculateClusterVersionState(repoVersionEnt);
+      if (!forceInstallOnNonMemberHost) {
+        hostVersEntity.setState(RepositoryVersionState.INSTALLING);
+        hostVersionDAO.merge(hostVersEntity);
+        cluster.recalculateClusterVersionState(repoVersionEnt);
+      }
       req.persist();
     } catch (AmbariException e) {
       throw new SystemException("Can not persist request", e);
     }
-    return getRequestStatus(req.getRequestStatusResponse());
+    return req;
   }
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f684c2b8/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java
index b7fd051..57b77b4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostStackVersionResourceProviderTest.java
@@ -32,6 +32,7 @@ import static org.easymock.EasyMock.verify;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
 import java.util.List;
@@ -292,6 +293,101 @@ public class HostStackVersionResourceProviderTest {
   }
 
   @Test
+  public void testCreateResources_on_host_not_belonging_To_any_cluster() throws Exception {
+    StackId stackId = new StackId("HDP", "2.0.1");
+
+    final Host host1 = createNiceMock("host1", Host.class);
+    expect(host1.getHostName()).andReturn("host1").anyTimes();
+    expect(host1.getOsFamily()).andReturn("redhat6").anyTimes();
+    replay(host1);
+    Map<String, Host> hostsForCluster = new HashMap<>();
+
+    ServiceComponentHost sch = createMock(ServiceComponentHost.class);
+
+    final ServiceOsSpecific.Package hivePackage = new ServiceOsSpecific.Package();
+    hivePackage.setName("hive");
+    final ServiceOsSpecific.Package mysqlPackage = new ServiceOsSpecific.Package();
+    mysqlPackage.setName("mysql");
+    mysqlPackage.setSkipUpgrade(Boolean.TRUE);
+    List<ServiceOsSpecific.Package> packages = Arrays.asList(hivePackage, mysqlPackage);
+
+    AbstractControllerResourceProvider.init(resourceProviderFactory);
+
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
+    expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
+
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+    expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
+    expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
+    expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
+      anyObject(Map.class), anyObject(String.class))).andReturn(packages).anyTimes();
+
+    expect(resourceProviderFactory.getHostResourceProvider(anyObject(Set.class), anyObject(Map.class),
+      eq(managementController))).andReturn(csvResourceProvider).anyTimes();
+
+    expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
+    expect(clusters.getHost(anyObject(String.class))).andReturn(host1);
+    expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
+    expect(cluster.getServices()).andReturn(new HashMap<String, Service>()).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(stackId);
+
+    expect(
+      repositoryVersionDAOMock.findByStackAndVersion(
+        anyObject(StackId.class),
+        anyObject(String.class))).andReturn(repoVersion);
+
+    expect(actionManager.getRequestTasks(anyLong())).andReturn(Collections.<HostRoleCommand>emptyList()).anyTimes();
+
+    StageUtils.setTopologyManager(injector.getInstance(TopologyManager.class));
+    StageUtils.setConfiguration(injector.getInstance(Configuration.class));
+
+    // replay
+    replay(managementController, response, clusters, resourceProviderFactory, csvResourceProvider,
+      cluster, repositoryVersionDAOMock, configHelper, sch, actionManager, hostVersionEntityMock, hostVersionDAOMock);
+
+    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
+      type,
+      PropertyHelper.getPropertyIds(type),
+      PropertyHelper.getKeyPropertyIds(type),
+      managementController);
+
+    injector.injectMembers(provider);
+
+    // add the property map to a set for the request.  add more maps for multiple creates
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<>();
+
+    Map<String, Object> properties = new LinkedHashMap<>();
+
+    // add properties to the request map
+    properties.put(HostStackVersionResourceProvider.HOST_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+    properties.put(HostStackVersionResourceProvider.HOST_STACK_VERSION_REPO_VERSION_PROPERTY_ID, "2.2.0.1-885");
+    properties.put(HostStackVersionResourceProvider.HOST_STACK_VERSION_STACK_PROPERTY_ID, "HDP");
+    properties.put(HostStackVersionResourceProvider.HOST_STACK_VERSION_VERSION_PROPERTY_ID, "2.0.1");
+    properties.put(HostStackVersionResourceProvider.HOST_STACK_VERSION_HOST_NAME_PROPERTY_ID, "host1");
+    Set<Map<String, String>> components = new HashSet<>();
+    Map<String, String> hiveMetastoreComponent = new HashMap<>();
+    hiveMetastoreComponent.put("name", "HIVE_METASTORE");
+    components.add(hiveMetastoreComponent);
+    Map<String, String> hiveServerstoreComponent = new HashMap<>();
+    hiveServerstoreComponent.put("name", "HIVE_SERVER");
+    components.add(hiveServerstoreComponent);
+    properties.put(HostStackVersionResourceProvider.HOST_STACK_VERSION_COMPONENT_NAMES_PROPERTY_ID, components);
+    properties.put(HostStackVersionResourceProvider.HOST_STACK_VERSION_FORCE_INSTALL_ON_NON_MEMBER_HOST_PROPERTY_ID,
+      "true");
+
+    propertySet.add(properties);
+
+    // create the request
+    Request request = PropertyHelper.getCreateRequest(propertySet, null);
+
+    provider.createResources(request);
+
+    // verify
+    verify(managementController, response, clusters);
+  }
+
+  @Test
   public void testCreateResources_in_out_of_sync_state() throws Exception {
     StackId stackId = new StackId("HDP", "2.0.1");
 


[36/50] [abbrv] ambari git commit: AMBARI-19541. Add log rotation settings - handle HDP upgrade scenario (Madhuvanthi Radhakrishnan via smohanty)

Posted by nc...@apache.org.
AMBARI-19541. Add log rotation settings - handle HDP upgrade scenario (Madhuvanthi Radhakrishnan via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4dac2783
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4dac2783
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4dac2783

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 4dac27831c2498e12fff05d33a95444e3abffdff
Parents: b2ba7dd
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Jan 17 14:15:57 2017 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Jan 17 14:15:57 2017 -0800

----------------------------------------------------------------------
 .../upgrade/ConfigUpgradeChangeDefinition.java  | 110 ++++++++++-
 .../state/stack/upgrade/ConfigureTask.java      |   9 +-
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml  | 148 +++++++++++++++
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml |  90 +++++++++
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml |  88 ++++++++-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml |  91 ++++++++-
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml     |  52 ++++-
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml     |  43 ++++-
 .../stacks/HDP/2.4/upgrades/config-upgrade.xml  | 174 ++++++++++++++++-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml |  97 +++++++++-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml |  87 ++++++++-
 .../stacks/HDP/2.4/upgrades/upgrade-2.5.xml     |  40 +++-
 .../stacks/HDP/2.4/upgrades/upgrade-2.6.xml     |  40 +++-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  | 190 ++++++++++++++++++-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |  95 ++++++++++
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |  41 ++++
 .../src/main/resources/upgrade-config.xsd       |  14 +-
 17 files changed, 1394 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
index 5428ea7..31df790 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
@@ -20,6 +20,9 @@ package org.apache.ambari.server.state.stack.upgrade;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
@@ -28,6 +31,8 @@ import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlType;
 
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -130,6 +135,9 @@ public class ConfigUpgradeChangeDefinition {
   @XmlElement(name="replace")
   private List<Replace> replacements;
 
+  @XmlElement(name="regex-replace")
+  private List<RegexReplace> regexReplacements;
+
   /**
    * @return the config type
    */
@@ -198,6 +206,50 @@ public class ConfigUpgradeChangeDefinition {
   }
 
   /**
+   * @return the replacement tokens, never {@code null}
+   */
+  public List<Replace> getRegexReplacements(Cluster cluster) {
+
+    if (null == regexReplacements) {
+
+      return Collections.emptyList();
+    }
+
+    List<Replace> list = new ArrayList<>();
+    for (RegexReplace regexReplaceObj : regexReplacements) {
+      if (null == regexReplaceObj.key || null == regexReplaceObj.find || null == regexReplaceObj.replaceWith) {
+        LOG.warn(String.format("Replacement %s is invalid", regexReplaceObj));
+        continue;
+      }
+
+      try{
+        Config config = cluster.getDesiredConfigByType(configType);
+
+        Map<String, String> properties = config.getProperties();
+        String content = properties.get(regexReplaceObj.key);
+
+        Pattern REGEX = Pattern.compile(regexReplaceObj.find, Pattern.MULTILINE);
+
+        Matcher patternMatchObj = REGEX.matcher(content);
+        if (patternMatchObj.find() && patternMatchObj.groupCount()==1) {
+          regexReplaceObj.find = patternMatchObj.group();
+          Replace rep = regexReplaceObj.copyToReplaceObject();
+          list.add(rep);
+        }
+
+        }catch(Exception e){
+          String message = "";
+          message = "ConfigUpgradeChangeDefinition: getRegexReplacements : Error while fetching config properties ";
+          LOG.error(message, e);
+
+        }
+
+      }
+    return list;
+  }
+
+
+  /**
    * Used for configuration updates that should mask their values from being
    * printed in plain text.
    */
@@ -379,4 +431,60 @@ public class ConfigUpgradeChangeDefinition {
               '}';
     }
   }
-}
\ No newline at end of file
+
+  /**
+   * Used to replace strings in a key with other strings.  More complex
+   * scenarios are possible with regex.
+   */
+  @XmlAccessorType(XmlAccessType.FIELD)
+  @XmlType(name = "regex-replace")
+  public static class RegexReplace extends Masked{
+    /**
+     * The key name
+     */
+    @XmlAttribute(name="key")
+    public String key;
+
+    /**
+     * The string to find
+     */
+    @XmlAttribute(name="find")
+    public String find;
+
+    /**
+     * The string to replace
+     */
+    @XmlAttribute(name="replace-with")
+    public String replaceWith;
+
+    @Override
+    public String toString() {
+      return "RegexReplace{" +
+              "key='" + key + '\'' +
+              ", find='" + find + '\'' +
+              ", replaceWith='" + replaceWith + '\'' +
+              ", ifKey='" + ifKey + '\'' +
+              ", ifType='" + ifType + '\'' +
+              ", ifValue='" + ifValue + '\'' +
+              ", ifKeyState='" + ifKeyState + '\'' +
+              '}';
+    }
+
+    /***
+     * Copies a RegexReplace type object to Replace object.
+     * @return Replace object
+     */
+    public Replace copyToReplaceObject(){
+      Replace rep = new Replace();
+      rep.find = this.find;
+      rep.key = this.key;
+      rep.replaceWith = this.replaceWith;
+      rep.ifKey = this.ifKey;
+      rep.ifType = this.ifType;
+      rep.ifValue = this.ifValue;
+      rep.ifKeyState = this.ifKeyState;
+
+      return rep;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java
index d7bb338..f256eb0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java
@@ -211,7 +211,12 @@ public class ConfigureTask extends ServerSideActionTask {
     }
 
     // replacements
-    List<Replace> replacements = definition.getReplacements();
+
+    List<Replace> replacements = new ArrayList<Replace>();
+    replacements.addAll(definition.getReplacements());
+    //Fetch the replacements that used regex to find a string
+    replacements.addAll(definition.getRegexReplacements(cluster));
+
     if( null != replacements && !replacements.isEmpty() ){
       List<Replace> allowedReplacements = getValidReplacements(cluster, definition.getConfigType(), replacements);
       configParameters.put(ConfigureTask.PARAMETER_REPLACEMENTS, m_gson.toJson(allowedReplacements));
@@ -319,4 +324,4 @@ public class ConfigureTask extends ServerSideActionTask {
 
     return config.getProperties().get(propertyKey);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index fd821aa..c9adb8c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -64,6 +64,19 @@
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
 
+          <!-- HBase Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="hbase_log4j_parameterize" summary="Parameterizing HBase Log4J Properties">
+            <type>hbase-log4j</type>
+            <set key="hbase_log_maxfilesize" value="256"/>
+            <set key="hbase_log_maxbackupindex" value="20"/>
+            <set key="hbase_security_log_maxfilesize" value="256"/>
+            <set key="hbase_security_log_maxbackupindex" value="20"/>
+            <regex-replace key="content" find="hbase.log.maxfilesize=([0-9]+)MB" replace-with="hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB"/>
+            <regex-replace key="content" find="hbase.log.maxbackupindex=([0-9]+)" replace-with="hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}"/>
+            <regex-replace key="content" find="hbase.security.log.maxfilesize=([0-9]+)MB" replace-with="hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB"/>
+            <regex-replace key="content" find="hbase.security.log.maxbackupindex=([0-9]+)" replace-with="hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}"/>
+          </definition>
+
         </changes>
       </component>
     </service>
@@ -232,9 +245,40 @@
             <type>ranger-env</type>
             <transfer operation="delete" delete-key="bind_anonymous" />
           </definition>
+          <definition xsi:type="configure" id="admin_log4j_parameterize" summary="Parameterizing Ranger Log4J Properties">
+            <type>admin-log4j</type>
+            <set key="ranger_xa_log_maxfilesize" value="256"/>
+            <set key="ranger_xa_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxFileSize={{ranger_xa_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxBackupIndex={{ranger_xa_log_maxbackupindex}}"/>
+          </definition>
 
         </changes>
       </component>
+
+      <component name="RANGER_USERSYNC">
+        <changes>
+          <definition xsi:type="configure" id="usersync_log4j_parameterize" summary="Parameterizing Ranger Usersync Log4J Properties">
+            <type>usersync-log4j</type>
+            <set key="ranger_usersync_log_maxfilesize" value="256"/>
+            <set key="ranger_usersync_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_usersync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_usersync_log_maxbackupindex}}"/>
+          </definition>
+        </changes>
+      </component>
+
+      <component name="RANGER_TAGSYNC">
+        <changes>
+          <definition xsi:type="configure" id="tagsync_log4j_parameterize" summary="Parameterizing Ranger Tagsync Log4J Properties">
+            <type>tagsync-log4j</type>
+            <set key="ranger_tagsync_log_maxfilesize" value="256"/>
+            <set key="ranger_tagsync_log_number_of_backup_files" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_tagsync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_tagsync_log_number_of_backup_files}}"/>
+          </definition>
+          </changes>
+      </component>
     </service>
 
     <service name="RANGER_KMS">
@@ -250,6 +294,17 @@
             <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
+          <definition xsi:type="configure" id="kms_log4j_parameterize" summary="Parameterizing Ranger KMS Log4J Properties">
+            <type>kms-log4j</type>
+            <set key="ranger_kms_log_maxfilesize" value="256"/>
+            <set key="ranger_kms_log_maxbackupindex" value="20"/>
+            <set key="ranger_kms_audit_log_maxfilesize" value="256"/>
+            <set key="ranger_kms_audit_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxFileSize = {{ranger_kms_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxBackupIndex = {{ranger_kms_log_maxbackupindex}}"/>
+            <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxFileSize = {{ranger_kms_audit_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxBackupIndex = {{ranger_kms_audit_log_maxbackupindex}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -274,6 +329,19 @@
             <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
+
+          <!-- HDFS Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="hdfs_log4j_parameterize" summary="Parameterizing Hdfs Log4J Properties">
+            <type>hdfs-log4j</type>
+            <set key="hadoop_log_max_backup_size" value="256"/>
+            <set key="hadoop_log_number_of_backup_files" value="10"/>
+            <set key="hadoop_security_log_max_backup_size" value="256"/>
+            <set key="hadoop_security_log_number_of_backup_files" value="20"/>
+            <regex-replace  key="content" find="log4j.appender.RFA.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RFA.MaxFileSize={{hadoop_log_max_backup_size}}MB"/>
+            <regex-replace  key="content" find="log4j.appender.RFA.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RFA.MaxBackupIndex={{hadoop_log_number_of_backup_files}}"/>
+            <regex-replace  key="content" find="hadoop.security.log.maxfilesize=([0-9]+)MB" replace-with="hadoop.security.log.maxfilesize={{hadoop_security_log_max_backup_size}}MB"/>
+            <regex-replace  key="content" find="hadoop.security.log.maxbackupindex=([0-9]+)" replace-with="hadoop.security.log.maxbackupindex={{hadoop_security_log_number_of_backup_files}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -337,6 +405,12 @@
             <type>oozie-site</type>
             <replace key="oozie.services" find="org.apache.oozie.service.CoordinatorStoreService," replace-with="" />
           </definition>
+          <!-- Oozie Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="oozie_log4j_parameterize" summary="Parameterizing Oozie Log4J Properties">
+            <type>oozie-log4j</type>
+            <set key="oozie_log_maxhistory" value="720"/>
+            <regex-replace key="content" find="^log4j.appender.oozie.RollingPolicy.MaxHistory=([0-9]+)" replace-with="log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -359,6 +433,17 @@
             <set key="inter.broker.protocol.version" value="0.9.0.0" />
             <set key="log.message.format.version" value="0.9.0.0" />
           </definition>
+          <definition xsi:type="configure" id="kafka_log4j_parameterize" summary="Parameterizing Kafka Log4J Properties">
+            <type>kafka-log4j</type>
+            <set key="kafka_log_maxfilesize" value="256"/>
+            <set key="kafka_log_maxbackupindex" value="20"/>
+            <set key="controller_log_maxfilesize" value="256"/>
+            <set key="controller_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kafkaAppender.MaxFileSize = {{kafka_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kafkaAppender.MaxBackupIndex = {{kafka_log_maxbackupindex}}"/>
+            <replace key="content" find="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.controllerAppender.MaxFileSize = {{controller_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.controllerAppender.MaxBackupIndex = {{controller_log_maxbackupindex}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -376,6 +461,14 @@
             <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
+          <!-- Yarn Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="yarn_log4j_parameterize" summary="Parameterizing Yarn Log4J Properties">
+            <type>yarn-log4j</type>
+            <set key="yarn_rm_summary_log_max_backup_size" value="256"/>
+            <set key="yarn_rm_summary_log_number_of_backup_files" value="20"/>
+            <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB"/>
+            <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -471,6 +564,20 @@
             <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
+          <definition xsi:type="configure" id="knox_gateway_log4j_parameterize" summary="Parameterizing Knox Gateway Log4J Properties">
+            <type>gateway-log4j</type>
+            <set key="knox_gateway_log_maxfilesize" value="256"/>
+            <set key="knox_gateway_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}"/>
+          </definition>
+          <definition xsi:type="configure" id="knox_ldap_log4j_parameterize" summary="Parameterizing Knox Ldap Log4J Properties">
+            <type>ldap-log4j</type>
+            <set key="knox_ldap_log_maxfilesize" value="256"/>
+            <set key="knox_ldap_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -482,8 +589,49 @@
             <type>falcon-startup.properties</type>
             <set key="*.application.services" value="org.apache.falcon.security.AuthenticationInitializationService, org.apache.falcon.workflow.WorkflowJobEndNotificationService, org.apache.falcon.service.ProcessSubscriberService, org.apache.falcon.extensions.ExtensionService, org.apache.falcon.service.LifecyclePolicyMap, org.apache.falcon.entity.store.ConfigurationStore, org.apache.falcon.rerun.service.RetryService, org.apache.falcon.rerun.service.LateRunService, org.apache.falcon.service.LogCleanupService, org.apache.falcon.metadata.MetadataMappingService{{atlas_application_class_addition}}"/>
           </definition>
+          <definition xsi:type="configure" id="falcon_log4j_parameterize" summary="Parameterizing Falcon Log4J Properties">
+            <type>falcon-log4j</type>
+            <set key="falcon_log_maxfilesize" value="256"/>
+            <set key="falcon_log_maxbackupindex" value="20"/>
+            <set key="falcon_security_log_maxfilesize" value="256"/>
+            <set key="falcon_security_log_maxbackupindex" value="20"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{falcon_log_maxfilesize}}MB&quot; /&gt;"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxBackupIndex&quot; value=&quot;{{falcon_log_maxbackupindex}}&quot; /&gt;"/>
+            <replace key="content" find="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{falcon_security_log_maxfilesize}}MB&quot;/&gt;"/>
+            <replace key="content" find="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxBackupIndex&quot; value=&quot;{{falcon_security_log_maxbackupindex}}&quot;/&gt;"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="ATLAS">
+      <component name="ATLAS_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="atlas_log4j_parameterize" summary="Parameterizing Atlas Log4J Properties">
+            <type>atlas-log4j</type>
+            <set key="atlas_log_max_backup_size" value="256"/>
+            <set key="atlas_log_number_of_backup_files" value="20"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;\n&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{atlas_log_max_backup_size}}MB&quot; /&gt;"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;\n&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{atlas_log_number_of_backup_files}}&quot; /&gt;"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="ZOOKEEPER">
+      <component name="ZOOKEEPER_SERVER">
+        <changes>
+          <!-- Zookeeper Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="zookeeper_log4j_parameterize" summary="Parameterizing ZooKeeper Log4J Properties">
+            <type>zookeeper-log4j</type>
+            <set key="zookeeper_log_max_backup_size" value="10"/>
+            <set key="zookeeper_log_number_of_backup_files" value="10"/>
+            <regex-replace  key="content" find="^log4j.appender.ROLLINGFILE.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.ROLLINGFILE.MaxFileSize={{zookeeper_log_max_backup_size}}MB"/>
+            <regex-replace key="content" find="^#log4j.appender.ROLLINGFILE.MaxBackupIndex=([0-9]+)" replace-with="#log4j.appender.ROLLINGFILE.MaxBackupIndex={{zookeeper_log_number_of_backup_files}}"/>
+          </definition>
         </changes>
       </component>
     </service>
+
   </services>
 </upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
index 8dff078..d674af7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml
@@ -258,6 +258,12 @@
         <task xsi:type="configure" id="hdp_2_4_0_0_namenode_ha_adjustments"/>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Parameterizing Hdfs Log4J Properties">
+        <task xsi:type="configure" id="hdfs_log4j_parameterize">
+          <summary>Updating the Hdfs Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">
@@ -265,6 +271,13 @@
         </task>
       </execute-stage>
 
+      <!--Yarn-->
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Parameterizing Yarn Log4J Properties Resource Manager">
+        <task xsi:type="configure" id="yarn_log4j_parameterize">
+          <summary>Updating the Yarn Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
           <summary>Verifying LZO codec path for mapreduce</summary>
@@ -277,6 +290,12 @@
         <task xsi:type="configure" id="hdp_2_4_0_0_hbase_remove_local_indexing"/>
       </execute-stage>
 
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Parameterizing HBase Log4J Properties">
+        <task xsi:type="configure" id="hbase_log4j_parameterize">
+          <summary>Updating the Hbase Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.HBaseEnvMaxDirectMemorySizeAction">
           <summary>Update HBase Env Configuration</summary>
@@ -310,6 +329,12 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Parameterizing Oozie Log4J Properties">
+        <task xsi:type="configure" id="oozie_log4j_parameterize">
+          <summary>Updating the Oozie Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server">
         <task xsi:type="configure" id="hdp_2_4_0_0_oozie_remove_service_classes" />
       </execute-stage>
@@ -342,6 +367,71 @@
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus">
         <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
       </execute-stage>
+
+     <!--ZOOKEEPER-->
+      <execute-stage service="ZOOKEEPER" component="ZOOKEEPER_SERVER" title="Parameterizing Zookeeper Log4J Properties">
+        <task xsi:type="configure" id="zookeeper_log4j_parameterize">
+          <summary>Updating the Zookeeper Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--ATLAS-->
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Parameterizing Atlas Log4J Properties">
+        <task xsi:type="configure" id="atlas_log4j_parameterize">
+          <summary>Updating the Atlas Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--KAFKA-->
+      <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
+        <task xsi:type="configure" id="kafka_log4j_parameterize">
+          <summary>Updating the Kafka Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--RANGER-->
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
+        <task xsi:type="configure" id="admin_log4j_parameterize">
+          <summary>Updating the Ranger admin Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Parameterizing Ranger Usersync Log4J Properties">
+        <task xsi:type="configure" id="usersync_log4j_parameterize">
+          <summary>Updating the Ranger usersync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="RANGER" component="RANGER_TAGSYNC" title="Parameterizing Ranger Tagsync Log4J Properties">
+        <task xsi:type="configure" id="tagsync_log4j_parameterize">
+          <summary>Updating the Ranger tagsync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--RANGER-KMS-->
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Parameterizing Ranger Kms Log4J Properties">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--KNOX-->
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Gateway Log4J Properties">
+        <task xsi:type="configure" id="knox_gateway_log4j_parameterize">
+          <summary>Updating the Knox Gateway Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Ldap Log4J Properties">
+        <task xsi:type="configure" id="knox_ldap_log4j_parameterize">
+          <summary>Updating the Knox Ldap Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--FALCON-->
+      <execute-stage service="FALCON" component="FALCON_SERVER" title="Parameterizing Falcon Log4J Properties">
+        <task xsi:type="configure" id="falcon_log4j_parameterize">
+          <summary>Updating the Falcon Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
     </group>
 
     <!--

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
index 11cd31a..7a05c99 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml
@@ -279,6 +279,12 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Parameterizing Hdfs Log4J Properties">
+        <task xsi:type="configure" id="hdfs_log4j_parameterize">
+          <summary>Updating the Hdfs Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">
@@ -290,6 +296,12 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Parameterizing Yarn Log4J Properties Resource Manager">
+        <task xsi:type="configure" id="yarn_log4j_parameterize">
+          <summary>Updating the Yarn Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="YARN" component="NODEMANAGER" title="Add Spark2 shuffle">
         <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
       </execute-stage>
@@ -310,6 +322,12 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Parameterizing HBase Log4J Properties">
+        <task xsi:type="configure" id="hbase_log4j_parameterize">
+          <summary>Updating the Hbase Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.HBaseEnvMaxDirectMemorySizeAction">
           <summary>Update HBase Env Configuration</summary>
@@ -374,15 +392,34 @@
         <task xsi:type="configure" id="hdp_2_4_0_0_oozie_remove_service_classes" />
       </execute-stage>
 
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Parameterizing Oozie Log4J Properties">
+        <task xsi:type="configure" id="oozie_log4j_parameterize">
+          <summary>Updating the Oozie Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!--FALCON-->
       <execute-stage service="FALCON" component="FALCON_SERVER" title="Apply config changes for Falcon">
         <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/>
       </execute-stage>
 
+      <execute-stage service="FALCON" component="FALCON_SERVER" title="Parameterizing Falcon Log4J Properties">
+        <task xsi:type="configure" id="falcon_log4j_parameterize">
+          <summary>Updating the Falcon Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--KAFKA-->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Kafka Broker">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
+        <task xsi:type="configure" id="kafka_log4j_parameterize">
+          <summary>Updating the Kafka Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- SPARK -->
       <execute-stage service="SPARK" component="SPARK_JOBHISTORYSERVER" title="Apply config changes for Spark JobHistoryServer">
         <task xsi:type="configure" id="hdp_2_4_0_0_spark_jobhistoryserver"/>
@@ -424,6 +461,12 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag"/>
       </execute-stage>
 
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
+        <task xsi:type="configure" id="admin_log4j_parameterize">
+          <summary>Updating the Ranger admin Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties">
         <condition xsi:type="security" type="kerberos"/>
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation">
@@ -437,6 +480,17 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Parameterizing Ranger Usersync Log4J Properties">
+        <task xsi:type="configure" id="usersync_log4j_parameterize">
+          <summary>Updating the Ranger usersync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="RANGER" component="RANGER_TAGSYNC" title="Parameterizing Ranger Tagsync Log4J Properties">
+        <task xsi:type="configure" id="tagsync_log4j_parameterize">
+          <summary>Updating the Ranger tagsync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- RANGER KMS -->
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS Server">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db"/>
@@ -449,11 +503,28 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Parameterizing Ranger Kms Log4J Properties">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- KNOX -->
       <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Gateway Log4J Properties">
+        <task xsi:type="configure" id="knox_gateway_log4j_parameterize">
+          <summary>Updating the Knox Gateway Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Ldap Log4J Properties">
+        <task xsi:type="configure" id="knox_ldap_log4j_parameterize">
+          <summary>Updating the Knox Ldap Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- STORM -->
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm Nimbus">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_storm_audit_db"/>
@@ -480,7 +551,22 @@
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus">
         <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
       </execute-stage>
-    </group>
+
+      <!--ATLAS-->
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Parameterizing Atlas Log4J Properties">
+        <task xsi:type="configure" id="atlas_log4j_parameterize">
+          <summary>Updating the Atlas Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--ZOOKEEPER-->
+      <execute-stage service="ZOOKEEPER" component="ZOOKEEPER_SERVER" title="Parameterizing Zookeeper Log4J Properties">
+        <task xsi:type="configure" id="zookeeper_log4j_parameterize">
+          <summary>Updating the Zookeeper Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      </group>
 
     <!--
     After processing this group, the user-specified Kerberos descriptor will be updated to work with

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
index 549e5a4..2c2049b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml
@@ -280,6 +280,12 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Parameterizing Hdfs Log4J Properties">
+        <task xsi:type="configure" id="hdfs_log4j_parameterize">
+          <summary>Updating the Hdfs Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">
@@ -295,6 +301,12 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
       </execute-stage>
 
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Parameterizing Yarn Log4J Properties Resource Manager">
+        <task xsi:type="configure" id="yarn_log4j_parameterize">
+          <summary>Updating the Yarn Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
           <summary>Verifying LZO codec path for mapreduce</summary>
@@ -311,6 +323,13 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db"/>
       </execute-stage>
 
+      <!--HBASE-->
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Parameterizing HBase Log4J Properties">
+        <task xsi:type="configure" id="hbase_log4j_parameterize">
+          <summary>Updating the Hbase Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Update HBase Configuration">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.HBaseEnvMaxDirectMemorySizeAction">
           <summary>Update HBase Env Configuration</summary>
@@ -375,15 +394,34 @@
         <task xsi:type="configure" id="hdp_2_4_0_0_oozie_remove_service_classes" />
       </execute-stage>
 
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Parameterizing Oozie Log4J Properties">
+        <task xsi:type="configure" id="oozie_log4j_parameterize">
+          <summary>Updating the Oozie Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!--FALCON-->
       <execute-stage service="FALCON" component="FALCON_SERVER" title="Apply config changes for Falcon">
         <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/>
       </execute-stage>
 
+      <execute-stage service="FALCON" component="FALCON_SERVER" title="Parameterizing Falcon Log4J Properties">
+        <task xsi:type="configure" id="falcon_log4j_parameterize">
+          <summary>Updating the Falcon Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--KAFKA-->
       <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Kafka Broker">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
+        <task xsi:type="configure" id="kafka_log4j_parameterize">
+          <summary>Updating the Kafka Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- SPARK -->
       <execute-stage service="SPARK" component="SPARK_JOBHISTORYSERVER" title="Apply config changes for Spark JobHistoryServer">
         <task xsi:type="configure" id="hdp_2_4_0_0_spark_jobhistoryserver"/>
@@ -425,6 +463,12 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag"/>
       </execute-stage>
 
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
+        <task xsi:type="configure" id="admin_log4j_parameterize">
+          <summary>Updating the Ranger admin Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties">
         <condition xsi:type="security" type="kerberos"/>
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation">
@@ -442,11 +486,29 @@
         <task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
       </execute-stage>
 
+       <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Parameterizing Ranger Usersync Log4J Properties">
+        <task xsi:type="configure" id="usersync_log4j_parameterize">
+          <summary>Updating the Ranger usersync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_TAGSYNC" title="Parameterizing Ranger Tagsync Log4J Properties">
+        <task xsi:type="configure" id="tagsync_log4j_parameterize">
+          <summary>Updating the Ranger tagsync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- RANGER KMS -->
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS Server">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger kms">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Calculating Ranger Properties">
         <condition xsi:type="security" type="kerberos"/>
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKmsProxyConfig">
@@ -459,6 +521,18 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Gateway Log4J Properties">
+        <task xsi:type="configure" id="knox_gateway_log4j_parameterize">
+          <summary>Updating the Knox Gateway Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Ldap Log4J Properties">
+        <task xsi:type="configure" id="knox_ldap_log4j_parameterize">
+          <summary>Updating the Knox Ldap Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- STORM -->
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm Nimbus">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_storm_audit_db"/>
@@ -485,7 +559,22 @@
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus">
         <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
       </execute-stage>
-    </group>
+
+     <!--ATLAS-->
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Parameterizing Atlas Log4J Properties">
+        <task xsi:type="configure" id="atlas_log4j_parameterize">
+          <summary>Updating the Atlas Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--ZOOKEEPER-->
+      <execute-stage service="ZOOKEEPER" component="ZOOKEEPER_SERVER" title="Parameterizing Zookeeper Log4J Properties">
+        <task xsi:type="configure" id="zookeeper_log4j_parameterize">
+          <summary>Updating the Zookeeper Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      </group>
 
     <!--
     After processing this group, the user-specified Kerberos descriptor will be updated to work with

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
index 0a7bcea..a37d171 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
@@ -470,6 +470,10 @@
   <processing>
     <service name="ZOOKEEPER">
       <component name="ZOOKEEPER_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="zookeeper_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -509,6 +513,9 @@
             <script>scripts/ranger_admin.py</script>
             <function>setup_ranger_java_patches</function>
           </task>
+
+          <task xsi:type="configure" id="admin_log4j_parameterize" />
+
         </pre-upgrade>
         
         <pre-downgrade copy-upgrade="true" />
@@ -520,6 +527,20 @@
       </component>
 
       <component name="RANGER_USERSYNC">
+        <pre-upgrade>
+          <task xsi:type="configure" id="usersync_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade/>
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+
+      <component name="RANGER_TAGSYNC">
+        <pre-upgrade>
+          <task xsi:type="configure" id="tagsync_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -529,6 +550,7 @@
     <service name="RANGER_KMS">
       <component name="RANGER_KMS_SERVER">
         <pre-upgrade>
+          <task xsi:type="configure" id="kms_log4j_parameterize" />
           <task xsi:type="execute" hosts="any" sequential="true">
             <summary>Upgrading Ranger KMS database schema</summary>
             <script>scripts/kms_server.py</script>
@@ -555,6 +577,7 @@
 
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_4_0_0_namenode_ha_adjustments"/>
+          <task xsi:type="configure" id="hdfs_log4j_parameterize" />
         </pre-upgrade>
         
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -630,6 +653,7 @@
 
       <component name="RESOURCEMANAGER">
         <pre-upgrade>
+          <task xsi:type="configure" id="yarn_log4j_parameterize" />
           <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">
             <summary>Calculating Yarn Properties for Spark Shuffle</summary>
           </task>
@@ -659,6 +683,7 @@
         <pre-upgrade>
           <!-- These HBASE configs changed in HDP 2.3.4.0, but Ambari can't distinguish HDP 2.3.2.0 vs HDP 2.3.4.0, so easier to always do them. -->
           <task xsi:type="configure" id="hdp_2_4_0_0_hbase_remove_local_indexing"/>
+          <task xsi:type="configure" id="hbase_log4j_parameterize" />
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -814,7 +839,7 @@
       <component name="OOZIE_SERVER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_4_0_0_oozie_remove_service_classes" />
-
+          <task xsi:type="configure" id="oozie_log4j_parameterize" />
           <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation">
             <summary>Adjusting Oozie properties</summary>
           </task>
@@ -863,6 +888,10 @@
 
     <service name="FALCON">
       <component name="FALCON_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="falcon_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade/>
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -876,6 +905,10 @@
 
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="kafka_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade/>
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -884,6 +917,10 @@
 
     <service name="KNOX">
       <component name="KNOX_GATEWAY">
+        <pre-upgrade>
+          <task xsi:type="configure" id="knox_gateway_log4j_parameterize" />
+          <task xsi:type="configure" id="knox_ldap_log4j_parameterize" />
+        </pre-upgrade>
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
         <upgrade>
@@ -963,5 +1000,18 @@
         </upgrade>
       </component>
     </service>
+
+    <service name="ATLAS">
+      <component name="ATLAS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="atlas_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+    </service>
+
   </processing>
 </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
index 7827533..47f58bd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.5.xml
@@ -558,6 +558,10 @@
   <processing>
     <service name="ZOOKEEPER">
       <component name="ZOOKEEPER_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="zookeeper_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade/>
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -573,6 +577,7 @@
     <service name="RANGER">
       <component name="RANGER_ADMIN">
         <pre-upgrade>
+          <task xsi:type="configure" id="admin_log4j_parameterize" />
           <task xsi:type="execute" hosts="all">
             <summary>Stop Ranger Admin</summary>
             <script>scripts/ranger_admin.py</script>
@@ -621,6 +626,20 @@
       </component>
 
       <component name="RANGER_USERSYNC">
+        <pre-upgrade>
+          <task xsi:type="configure" id="usersync_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade/>
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+
+      <component name="RANGER_TAGSYNC">
+        <pre-upgrade>
+          <task xsi:type="configure" id="tagsync_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -631,7 +650,7 @@
       <component name="RANGER_KMS_SERVER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db" />
-
+          <task xsi:type="configure" id="kms_log4j_parameterize" />
           <task xsi:type="execute" hosts="any" sequential="true">
             <summary>Upgrading Ranger KMS database schema</summary>
             <script>scripts/kms_server.py</script>
@@ -658,6 +677,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_4_0_0_namenode_ha_adjustments"/>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db" />
+          <task xsi:type="configure" id="hdfs_log4j_parameterize" />
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -733,6 +753,7 @@
 
       <component name="RESOURCEMANAGER">
         <pre-upgrade>
+          <task xsi:type="configure" id="yarn_log4j_parameterize" />
           <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig">
             <summary>Calculating Yarn Properties for Spark Shuffle</summary>
           </task>
@@ -770,6 +791,7 @@
           <!-- These HBASE configs changed in HDP 2.3.4.0, but Ambari can't distinguish HDP 2.3.2.0 vs HDP 2.3.4.0, so easier to always do them. -->
           <task xsi:type="configure" id="hdp_2_4_0_0_hbase_remove_local_indexing"/>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db" />
+          <task xsi:type="configure" id="hbase_log4j_parameterize" />
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -944,7 +966,7 @@
       <component name="OOZIE_SERVER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_4_0_0_oozie_remove_service_classes" />
-
+          <task xsi:type="configure" id="oozie_log4j_parameterize" />
           <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation">
             <summary>Adjusting Oozie properties</summary>
           </task>
@@ -998,6 +1020,7 @@
       <component name="FALCON_SERVER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/>
+          <task xsi:type="configure" id="falcon_log4j_parameterize" />
         </pre-upgrade>
         <pre-downgrade/>
         <upgrade>
@@ -1016,6 +1039,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db" />
           <task xsi:type="configure" id="hdp_2_5_0_0_add_protocol_compatibility" />
+          <task xsi:type="configure" id="kafka_log4j_parameterize" />
         </pre-upgrade>
         
         <pre-downgrade/>
@@ -1030,6 +1054,8 @@
       <component name="KNOX_GATEWAY">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db" />
+          <task xsi:type="configure" id="knox_gateway_log4j_parameterize" />
+          <task xsi:type="configure" id="knox_ldap_log4j_parameterize" />
         </pre-upgrade>
         
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -1190,5 +1216,18 @@
         </upgrade>
       </component>
     </service>
+
+    <service name="ATLAS">
+    <component name="ATLAS_SERVER">
+      <pre-upgrade>
+        <task xsi:type="configure" id="atlas_log4j_parameterize" />
+      </pre-upgrade>
+      <pre-downgrade />
+      <upgrade>
+        <task xsi:type="restart-task" />
+      </upgrade>
+    </component>
+    </service>
+
   </processing>
 </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
index fa09448..4451306 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/config-upgrade.xml
@@ -139,6 +139,37 @@
             <transfer operation="delete" delete-key="bind_anonymous" />
           </definition>
 
+          <definition xsi:type="configure" id="admin_log4j_parameterize" summary="Parameterizing Ranger Log4J Properties">
+            <type>admin-log4j</type>
+            <set key="ranger_xa_log_maxfilesize" value="256"/>
+            <set key="ranger_xa_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxFileSize={{ranger_xa_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxBackupIndex={{ranger_xa_log_maxbackupindex}}"/>
+          </definition>
+
+        </changes>
+      </component>
+
+      <component name="RANGER_USERSYNC">
+        <changes>
+          <definition xsi:type="configure" id="usersync_log4j_parameterize" summary="Parameterizing Ranger Usersync Log4J Properties">
+            <type>usersync-log4j</type>
+            <set key="ranger_usersync_log_maxfilesize" value="256"/>
+            <set key="ranger_usersync_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_usersync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_usersync_log_maxbackupindex}}"/>
+          </definition>
+        </changes>
+      </component>
+      <component name="RANGER_TAGSYNC">
+        <changes>
+          <definition xsi:type="configure" id="tagsync_log4j_parameterize" summary="Parameterizing Ranger Tagsync Log4J Properties">
+            <type>tagsync-log4j</type>
+            <set key="ranger_tagsync_log_maxfilesize" value="256"/>
+            <set key="ranger_tagsync_log_number_of_backup_files" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_tagsync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_tagsync_log_number_of_backup_files}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -156,6 +187,17 @@
             <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
+          <definition xsi:type="configure" id="kms_log4j_parameterize" summary="Parameterizing Ranger KMS Log4J Properties">
+            <type>kms-log4j</type>
+            <set key="ranger_kms_log_maxfilesize" value="256"/>
+            <set key="ranger_kms_log_maxbackupindex" value="20"/>
+            <set key="ranger_kms_audit_log_maxfilesize" value="256"/>
+            <set key="ranger_kms_audit_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxFileSize = {{ranger_kms_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxBackupIndex = {{ranger_kms_log_maxbackupindex}}"/>
+            <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxFileSize = {{ranger_kms_audit_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxBackupIndex = {{ranger_kms_audit_log_maxbackupindex}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -179,6 +221,18 @@
             <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
+          <!-- HDFS Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="hdfs_log4j_parameterize" summary="Parameterizing Hdfs Log4J Properties">
+            <type>hdfs-log4j</type>
+            <set key="hadoop_log_max_backup_size" value="256"/>
+            <set key="hadoop_log_number_of_backup_files" value="10"/>
+            <set key="hadoop_security_log_max_backup_size" value="256"/>
+            <set key="hadoop_security_log_number_of_backup_files" value="20"/>
+            <regex-replace  key="content" find="log4j.appender.RFA.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RFA.MaxFileSize={{hadoop_log_max_backup_size}}MB"/>
+            <regex-replace  key="content" find="log4j.appender.RFA.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RFA.MaxBackupIndex={{hadoop_log_number_of_backup_files}}"/>
+            <regex-replace  key="content" find="hadoop.security.log.maxfilesize=([0-9]+)MB" replace-with="hadoop.security.log.maxfilesize={{hadoop_security_log_max_backup_size}}MB"/>
+            <regex-replace  key="content" find="hadoop.security.log.maxbackupindex=([0-9]+)" replace-with="hadoop.security.log.maxbackupindex={{hadoop_security_log_number_of_backup_files}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -196,6 +250,14 @@
             <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
+          <!-- Yarn Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="yarn_log4j_parameterize" summary="Parameterizing Yarn Log4J Properties">
+            <type>yarn-log4j</type>
+            <set key="yarn_rm_summary_log_max_backup_size" value="256"/>
+            <set key="yarn_rm_summary_log_number_of_backup_files" value="20"/>
+            <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB"/>
+            <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}"/>
+          </definition>
         </changes>
       </component>
 
@@ -228,6 +290,17 @@
             <set key="inter.broker.protocol.version" value="0.9.0.0" />
             <set key="log.message.format.version" value="0.9.0.0" />
           </definition>
+          <definition xsi:type="configure" id="kafka_log4j_parameterize" summary="Parameterizing Kafka Log4J Properties">
+            <type>kafka-log4j</type>
+            <set key="kafka_log_maxfilesize" value="256"/>
+            <set key="kafka_log_maxbackupindex" value="20"/>
+            <set key="controller_log_maxfilesize" value="256"/>
+            <set key="controller_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kafkaAppender.MaxFileSize = {{kafka_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kafkaAppender.MaxBackupIndex = {{kafka_log_maxbackupindex}}"/>
+            <replace key="content" find="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.controllerAppender.MaxFileSize = {{controller_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.controllerAppender.MaxBackupIndex = {{controller_log_maxbackupindex}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -325,6 +398,18 @@
             <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
+          <!-- HBase Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="hbase_log4j_parameterize" summary="Parameterizing HBase Log4J Properties">
+            <type>hbase-log4j</type>
+            <set key="hbase_log_maxfilesize" value="256"/>
+            <set key="hbase_log_maxbackupindex" value="20"/>
+            <set key="hbase_security_log_maxfilesize" value="256"/>
+            <set key="hbase_security_log_maxbackupindex" value="20"/>
+            <regex-replace key="content" find="hbase.log.maxfilesize=([0-9]+)MB" replace-with="hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB"/>
+            <regex-replace key="content" find="hbase.log.maxbackupindex=([0-9]+)" replace-with="hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}"/>
+            <regex-replace key="content" find="hbase.security.log.maxfilesize=([0-9]+)MB" replace-with="hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB"/>
+            <regex-replace key="content" find="hbase.security.log.maxbackupindex=([0-9]+)" replace-with="hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -342,6 +427,20 @@
             <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
             <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
           </definition>
+          <definition xsi:type="configure" id="knox_gateway_log4j_parameterize" summary="Parameterizing Knox Gateway Log4J Properties">
+            <type>gateway-log4j</type>
+            <set key="knox_gateway_log_maxfilesize" value="256"/>
+            <set key="knox_gateway_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}"/>
+          </definition>
+          <definition xsi:type="configure" id="knox_ldap_log4j_parameterize" summary="Parameterizing Knox Ldap Log4J Properties">
+            <type>ldap-log4j</type>
+            <set key="knox_ldap_log_maxfilesize" value="256"/>
+            <set key="knox_ldap_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}"/>
+          </definition>
         </changes>
       </component>
     </service>
@@ -353,8 +452,81 @@
             <type>falcon-startup.properties</type>
             <set key="*.application.services" value="org.apache.falcon.security.AuthenticationInitializationService, org.apache.falcon.workflow.WorkflowJobEndNotificationService, org.apache.falcon.service.ProcessSubscriberService, org.apache.falcon.extensions.ExtensionService, org.apache.falcon.service.LifecyclePolicyMap, org.apache.falcon.entity.store.ConfigurationStore, org.apache.falcon.rerun.service.RetryService, org.apache.falcon.rerun.service.LateRunService, org.apache.falcon.service.LogCleanupService, org.apache.falcon.metadata.MetadataMappingService{{atlas_application_class_addition}}"/>
           </definition>
+          <definition xsi:type="configure" id="falcon_log4j_parameterize" summary="Parameterizing Falcon Log4J Properties">
+            <type>falcon-log4j</type>
+            <set key="falcon_log_maxfilesize" value="256"/>
+            <set key="falcon_log_maxbackupindex" value="20"/>
+            <set key="falcon_security_log_maxfilesize" value="256"/>
+            <set key="falcon_security_log_maxbackupindex" value="20"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{falcon_log_maxfilesize}}MB&quot; /&gt;"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxBackupIndex&quot; value=&quot;{{falcon_log_maxbackupindex}}&quot; /&gt;"/>
+            <replace key="content" find="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{falcon_security_log_maxfilesize}}MB&quot;/&gt;"/>
+            <replace key="content" find="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxBackupIndex&quot; value=&quot;{{falcon_security_log_maxbackupindex}}&quot;/&gt;"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="SPARK">
+      <component name="SPARK_JOBHISTORYSERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_5_0_0_spark_jobhistoryserver">
+            <type>spark-defaults</type>
+            <transfer operation="delete" delete-key="spark.yarn.max.executor.failures" />
+          </definition>
+        </changes>
+      </component>
+      <component name="SPARK_CLIENT">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_5_0_0_spark_client">
+            <type>spark-defaults</type>
+            <transfer operation="delete" delete-key="spark.yarn.max.executor.failures" />
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="OOZIE">
+      <component name="OOZIE_SERVER">
+        <changes>
+          <!-- Oozie Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="oozie_log4j_parameterize" summary="Parameterizing Oozie Log4J Properties">
+            <type>oozie-log4j</type>
+            <set key="oozie_log_maxhistory" value="720"/>
+            <regex-replace key="content" find="^log4j.appender.oozie.RollingPolicy.MaxHistory=([0-9]+)" replace-with="log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="ZOOKEEPER">
+      <component name="ZOOKEEPER_SERVER">
+        <changes>
+          <!-- Zookeeper Rolling properties for log4j need to be parameterized. -->
+          <definition xsi:type="configure" id="zookeeper_log4j_parameterize" summary="Parameterizing ZooKeeper Log4J Properties">
+            <type>zookeeper-log4j</type>
+            <set key="zookeeper_log_max_backup_size" value="10"/>
+            <set key="zookeeper_log_number_of_backup_files" value="10"/>
+            <regex-replace  key="content" find="^log4j.appender.ROLLINGFILE.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.ROLLINGFILE.MaxFileSize={{zookeeper_log_max_backup_size}}MB"/>
+            <regex-replace key="content" find="^#log4j.appender.ROLLINGFILE.MaxBackupIndex=([0-9]+)" replace-with="#log4j.appender.ROLLINGFILE.MaxBackupIndex={{zookeeper_log_number_of_backup_files}}"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="ATLAS">
+      <component name="ATLAS_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="atlas_log4j_parameterize" summary="Parameterizing Atlas Log4J Properties">
+            <type>atlas-log4j</type>
+            <set key="atlas_log_max_backup_size" value="256"/>
+            <set key="atlas_log_number_of_backup_files" value="20"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;\n&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{atlas_log_max_backup_size}}MB&quot; /&gt;"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;\n&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{atlas_log_number_of_backup_files}}&quot; /&gt;"/>
+          </definition>
         </changes>
       </component>
     </service>
-  </services>
+
+     </services>
 </upgrade-config-changes>


[47/50] [abbrv] ambari git commit: AMBARI-19602. Input logic tags are missing in the coordinator xml(Padma Priya N via gauravn7)

Posted by nc...@apache.org.
AMBARI-19602. Input logic tags are missing in the coordinator xml(Padma Priya N via gauravn7)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7155d9c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7155d9c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7155d9c

Branch: refs/heads/branch-dev-patch-upgrade
Commit: f7155d9c40362d084bae5e695b0af1fe8fd584c3
Parents: f684c2b
Author: Gaurav Nagar <gr...@gmail.com>
Authored: Wed Jan 18 17:00:01 2017 +0530
Committer: Gaurav Nagar <gr...@gmail.com>
Committed: Wed Jan 18 17:00:01 2017 +0530

----------------------------------------------------------------------
 .../resources/ui/app/components/coord-config.js    | 12 ++++++++++++
 .../main/resources/ui/app/components/job-config.js | 17 ++++++++++++-----
 .../domain/coordinator/coordinator-xml-importer.js | 12 ++++++++++++
 .../ui/app/templates/components/coord-config.hbs   | 11 +++++++----
 .../ui/app/templates/components/job-config.hbs     |  4 +++-
 5 files changed, 46 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f7155d9c/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
index eb0d585..26b94cf 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
@@ -298,6 +298,10 @@ export default Ember.Component.extend(Validations, Ember.Evented, {
     if(coordinator.get('dataInputType') === 'logical'){
       this.set('conditionalDataInExists', true);
     }
+    if(coordinator.get('inputLogic')){
+      this.set('inputLogicExists', true);
+      this.set('inputLogicEnabled', true);
+    }
   },
   validateChildComponents(){
     var isChildComponentsValid = true;
@@ -403,7 +407,15 @@ export default Ember.Component.extend(Validations, Ember.Evented, {
       this.set('dataOutputEditMode', false);
       this.set('dataOutputCreateMode', false);
     },
+    dryrunCoordinator(){
+      this.set('dryrun', true);
+      this.send('submit');
+    },
     submitCoordinator(){
+      this.set('dryrun', false);
+      this.send('submit');
+    },
+    submit(){
       var isChildComponentsValid = this.validateChildComponents();
       if(this.get('validations.isInvalid') || !isChildComponentsValid) {
         this.set('showErrorMessage', true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7155d9c/contrib/views/wfmanager/src/main/resources/ui/app/components/job-config.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/job-config.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/job-config.js
index 0979c25..beb15f6 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/job-config.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/job-config.js
@@ -207,11 +207,18 @@ export default Ember.Component.extend(Validations, {
       data: this.get("jobXml"),
       success: function(response) {
         var result=JSON.parse(response);
-        this.showNotification({
-          "type": "success",
-          "message": this.get('displayName') +" saved.",
-          "details": "Job id :"+result.id
-        });
+        if(this.get('isDryrun')){
+          this.showNotification({
+            "type": "success",
+            "message": `${this.get('displayName')} is valid.`
+          });
+        }else{
+          this.showNotification({
+            "type": "success",
+            "message": this.get('displayName') +" saved.",
+            "details": "Job id :"+result.id
+          });
+        }
         this.set("savingInProgress",false);
       }.bind(this),
       error: function(response) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7155d9c/contrib/views/wfmanager/src/main/resources/ui/app/domain/coordinator/coordinator-xml-importer.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/domain/coordinator/coordinator-xml-importer.js b/contrib/views/wfmanager/src/main/resources/ui/app/domain/coordinator/coordinator-xml-importer.js
index 9b54495..ddb2b43 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/domain/coordinator/coordinator-xml-importer.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/domain/coordinator/coordinator-xml-importer.js
@@ -95,6 +95,9 @@ var CoordinatorXmlImporter= Ember.Object.extend({
       coordinator.supportsConditionalDataInput = true;
       this.extractLogicalInputEvents(coordinatorApp, coordinator);
     }
+    if(coordinatorApp['input-logic']){
+      this.extractInputLogic(coordinatorApp, coordinator);
+    }
     this.extractOutputEvents(coordinatorApp, coordinator);
     this.extractAction(coordinatorApp, coordinator);
     this.extractParameters(coordinatorApp, coordinator);
@@ -187,6 +190,15 @@ var CoordinatorXmlImporter= Ember.Object.extend({
       this.parseConditionTree(conditionJson[key], condition);
     }, this);
   },
+  extractInputLogic(coordinatorApp, coordinator){
+    var conditionJson = coordinatorApp['input-logic'];
+    var condition = {};
+    coordinator.inputLogic = condition;
+    Object.keys(conditionJson).forEach((key)=>{
+      condition.operator = key;
+      this.parseConditionTree(conditionJson[key], condition);
+    }, this);
+  },
   extractDataInputOperand(operandJson){
     var operand = {};
     operand.name = operandJson._name;

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7155d9c/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
index f906fd5..dc69382 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
@@ -35,7 +35,7 @@
             <ul class="dropdown-menu">
               <li>
                 <a  class="pointer" title="Import workflow" {{action "openFileBrowser" "coordinatorFilePath"}}>
-                  <i class="fa fa-download"> Import</i>
+                  <i class="fa fa-download marginright5"></i>Import
                 </a>
              </li>
              <li>
@@ -78,6 +78,9 @@
             <button id="save-coord" type="button" class="btn btn-default" title="Save coordinator in HDFS" {{action "save"}}>
                 <i class="fa fa-floppy-o"></i> Save
             </button>
+            <button type="button" class="btn btn-default" title="Validate Coordinator" {{action "dryrunCoordinator"}}>
+                <i class="fa fa-play marginright5"></i>Validate
+            </button>
             <button type="button" class="btn btn-primary" title="Submit Coordinator" {{action "submitCoordinator"}}>
               <i class="fa fa-upload marginright5"></i>Submit
             </button>
@@ -245,7 +248,7 @@
                   <ul class="list-groups">
                     {{#if conditionalDataInExists}}
                     <li class="list-group-item">
-                      {{#conditional-data-input condition=coordinator.conditionalDataInput datasets=datasetsForInputs isToplevel=true register="registerChild" deregister="deregisterChild"}}
+                      {{#conditional-data-input condition=coordinator.conditionalDataInput datasets=coordinator.dataInputs isToplevel=true register="registerChild" deregister="deregisterChild"}}
                       <span class="pull-right">
                         <i class="fa fa-trash-o" title="Delete" {{action "deleteCondition" index bubbles=false}}></i>
                       </span>
@@ -271,7 +274,7 @@
                       <ul class="list-group">
                         {{#if inputLogicExists}}
                         <li class="list-group-item">
-                          {{#conditional-data-input condition=coordinator.inputLogic datasets=datasetsForInputs isToplevel=true register="registerChild" deregister="deregisterChild"}}
+                          {{#conditional-data-input condition=coordinator.inputLogic datasets=coordinator.dataInputs isToplevel=true register="registerChild" deregister="deregisterChild"}}
                           <span class="pull-right">
                             <i class="fa fa-trash-o" title="Delete" {{action "deleteInputLogic" index bubbles=false}}></i>
                           </span>
@@ -336,7 +339,7 @@
     parameterizedWorkflowPath=parameterizedWorkflowPath
     extractProperties="extractProperties" containsParameteriedPaths=containsParameteriedPaths
     jobFilePath=coordinatorFilePath openFileBrowser="openFileBrowser"
-    closeFileBrowser="closeFileBrowser" jobConfigs=coordinatorConfigs}}
+    closeFileBrowser="closeFileBrowser" jobConfigs=coordinatorConfigs isDryrun=dryrun}}
 {{/if}}
 {{#if showingResetConfirmation}}
 {{#confirmation-dialog title="Confirm Coordinator Reset"

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7155d9c/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-config.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-config.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-config.hbs
index b33420a..7ffdf5f 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-config.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/job-config.hbs
@@ -174,7 +174,9 @@
           {{#if isDryrun}}
             <button type="button" class="btn btn-primary" {{action "dryrun"}}>Validate</button>
           {{else}}
-            <button type="button" class="btn btn-default" {{action "dryrun"}}>Validate</button>
+            {{#if (not (eq type 'bundle'))}}
+              <button type="button" class="btn btn-default" {{action "dryrun"}}>Validate</button>
+            {{/if}}
             <button type="button" class="btn btn-primary" {{action "save"}}>Submit</button>
           {{/if}}
         {{/if}}


[43/50] [abbrv] ambari git commit: AMBARI-19595. HiveView2.0 not loading in secure environment. (dipayanb)

Posted by nc...@apache.org.
AMBARI-19595. HiveView2.0 not loading in secure environment. (dipayanb)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/15233860
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/15233860
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/15233860

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 15233860e631e8db568e797b3d894c0da465bf72
Parents: bc77c9c
Author: Dipayan Bhowmick <di...@gmail.com>
Authored: Wed Jan 18 13:03:36 2017 +0530
Committer: Dipayan Bhowmick <di...@gmail.com>
Committed: Wed Jan 18 13:04:19 2017 +0530

----------------------------------------------------------------------
 .../src/main/java/org/apache/ambari/view/hive20/AuthParams.java    | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/15233860/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/AuthParams.java
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/AuthParams.java b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/AuthParams.java
index 98e6caf..60df377 100644
--- a/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/AuthParams.java
+++ b/contrib/views/hive20/src/main/java/org/apache/ambari/view/hive20/AuthParams.java
@@ -68,6 +68,8 @@ public class AuthParams {
   public UserGroupInformation getProxyUser() throws IOException {
     UserGroupInformation ugi;
     String proxyuser = null;
+
+    UserGroupInformation.isSecurityEnabled();
     if(context.getCluster() != null) {
       proxyuser = context.getCluster().getConfigurationValue("cluster-env","ambari_principal_name");
     }


[28/50] [abbrv] ambari git commit: Revert "AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)"

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json.orig
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json.orig b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json.orig
deleted file mode 100644
index 147c1c0..0000000
--- a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json.orig
+++ /dev/null
@@ -1,3170 +0,0 @@
-
-{
-  "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services?fields=StackServices/*,components/*,components/dependencies/Dependencies/scope,artifacts/Artifacts/artifact_name",
-  "items" : [
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS",
-      "StackServices" : {
-        "comments" : "A system for metrics collection that provides storage and retrieval capability for metrics collected from the cluster\n      ",
-        "custom_commands" : [ ],
-        "display_name" : "Ambari Metrics",
-        "required_services" : [
-          "ZOOKEEPER"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "AMBARI_METRICS",
-        "service_version" : "0.1.0",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "ams-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "ams-hbase-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "ams-hbase-log4j" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "ams-hbase-policy" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          },
-          "ams-hbase-security-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "ams-hbase-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "ams-log4j" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "ams-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          }
-        },
-        "kerberos_descriptor" : {
-          "components" : [
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "ams-hbase-security-site/hbase.master.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${ams-env/ambari_metrics_user}",
-                    "value" : "amshbase/_HOST@${realm}"
-                  },
-                  "name" : "ams_hbase_master_hbase",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${ams-env/ambari_metrics_user}"
-                    },
-                    "file" : "${keytab_dir}/ams-hbase.master.keytab",
-                    "configuration" : "ams-hbase-security-site/hbase.master.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "ams-hbase-security-site/hbase.regionserver.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${ams-env/ambari_metrics_user}",
-                    "value" : "amshbase/_HOST@${realm}"
-                  },
-                  "name" : "ams_hbase_regionserver_hbase",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${ams-env/ambari_metrics_user}"
-                    },
-                    "file" : "${keytab_dir}/ams-hbase.regionserver.keytab",
-                    "configuration" : "ams-hbase-security-site/hbase.regionserver.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "ams-hbase-security-site/hbase.myclient.principal",
-                    "type" : "service",
-                    "local_username" : "${ams-env/ambari_metrics_user}",
-                    "value" : "amshbase/_HOST@${realm}"
-                  },
-                  "name" : "ams_collector",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${ams-env/ambari_metrics_user}"
-                    },
-                    "file" : "${keytab_dir}/ams.collector.keytab",
-                    "configuration" : "ams-hbase-security-site/hbase.myclient.keytab",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "ams-hbase-security-site/ams.zookeeper.principal",
-                    "type" : "service",
-                    "local_username" : "${ams-env/ambari_metrics_user}",
-                    "value" : "zookeeper/_HOST@${realm}"
-                  },
-                  "name" : "ams_zookeeper",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${ams-env/ambari_metrics_user}"
-                    },
-                    "file" : "${keytab_dir}/zk.service.ams.keytab",
-                    "configuration" : "ams-hbase-security-site/ams.zookeeper.keytab",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                }
-              ],
-              "configurations" : [
-                {
-                  "ams-hbase-security-site" : {
-                    "hbase.coprocessor.master.classes" : "org.apache.hadoop.hbase.security.access.AccessController",
-                    "hadoop.security.authentication" : "kerberos",
-                    "hbase.security.authentication" : "kerberos",
-                    "hbase.coprocessor.region.classes" : "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController",
-                    "hbase.security.authorization" : "true",
-                    "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal" : "true",
-                    "hbase.zookeeper.property.jaasLoginRenew" : "3600000",
-                    "hbase.zookeeper.property.authProvider.1" : "org.apache.zookeeper.server.auth.SASLAuthenticationProvider",
-                    "hbase.zookeeper.property.kerberos.removeHostFromPrincipal" : "true"
-                  }
-                },
-                {
-                  "ams-hbase-site": {
-                    "zookeeper.znode.parent": "/ams-hbase-secure"
-                  }
-                }
-              ],
-              "name" : "METRICS_COLLECTOR"
-            }
-          ],
-          "identities" : [
-            {
-              "name" : "/spnego"
-            },
-            {
-              "name" : "/hdfs"
-            }
-          ],
-          "name" : "AMBARI_METRICS"
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/components/METRICS_COLLECTOR",
-          "StackServiceComponents" : {
-            "cardinality" : "1",
-            "component_category" : "MASTER",
-            "component_name" : "METRICS_COLLECTOR",
-            "custom_commands" : [ ],
-            "display_name" : "Metrics Collector",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "AMBARI_METRICS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/components/METRICS_COLLECTOR/dependencies/ZOOKEEPER_SERVER",
-              "Dependencies" : {
-                "component_name" : "ZOOKEEPER_SERVER",
-                "dependent_component_name" : "METRICS_COLLECTOR",
-                "dependent_service_name" : "AMBARI_METRICS",
-                "scope" : "cluster",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/components/METRICS_MONITOR",
-          "StackServiceComponents" : {
-            "cardinality" : "ALL",
-            "component_category" : "SLAVE",
-            "component_name" : "METRICS_MONITOR",
-            "custom_commands" : [ ],
-            "display_name" : "Metrics Monitor",
-            "is_client" : false,
-            "is_master" : false,
-            "service_name" : "AMBARI_METRICS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "auto_deploy" : {
-            "enabled" : true
-          },
-          "dependencies" : [ ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/artifacts/kerberos_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "kerberos_descriptor",
-            "service_name" : "AMBARI_METRICS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "AMBARI_METRICS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON",
-      "StackServices" : {
-        "comments" : "Data management and processing platform",
-        "custom_commands" : [ ],
-        "display_name" : "Falcon",
-        "required_services" : [
-          "OOZIE"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "FALCON",
-        "service_version" : "0.5.0.2.1",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "falcon-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "falcon-runtime.properties" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "falcon-startup.properties" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          }
-        },
-        "kerberos_descriptor" : {
-          "components" : [
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "falcon-startup.properties/*.falcon.service.authentication.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${falcon-env/falcon_user}",
-                    "value" : "falcon/_HOST@${realm}"
-                  },
-                  "name" : "falcon_server",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${falcon-env/falcon_user}"
-                    },
-                    "file" : "${keytab_dir}/falcon.service.keytab",
-                    "configuration" : "falcon-startup.properties/*.falcon.service.authentication.kerberos.keytab",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "falcon-startup.properties/*.falcon.http.authentication.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : null,
-                    "value" : "HTTP/_HOST@${realm}"
-                  },
-                  "name" : "/spnego",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : null,
-                      "name" : null
-                    },
-                    "file" : null,
-                    "configuration" : "falcon-startup.properties/*.falcon.http.authentication.kerberos.keytab",
-                    "group" : {
-                      "access" : null,
-                      "name" : null
-                    }
-                  }
-                }
-              ],
-              "name" : "FALCON_SERVER"
-            }
-          ],
-          "configurations" : [
-            {
-              "falcon-startup.properties" : {
-                "*.dfs.namenode.kerberos.principal" : "nn/_HOST@${realm}",
-                "*.falcon.http.authentication.type" : "kerberos",
-                "*.falcon.authentication.type" : "kerberos"
-              }
-            }
-          ],
-          "identities" : [
-            {
-              "name" : "/spnego"
-            },
-            {
-              "name" : "/smokeuser"
-            },
-            {
-              "name" : "/hdfs"
-            }
-          ],
-          "name" : "FALCON"
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/components/FALCON_CLIENT",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "CLIENT",
-            "component_name" : "FALCON_CLIENT",
-            "custom_commands" : [ ],
-            "display_name" : "Falcon Client",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "FALCON",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/components/FALCON_SERVER",
-          "StackServiceComponents" : {
-            "cardinality" : "1",
-            "component_category" : "MASTER",
-            "component_name" : "FALCON_SERVER",
-            "custom_commands" : [ ],
-            "display_name" : "Falcon Server",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "FALCON",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/components/FALCON_SERVER/dependencies/OOZIE_CLIENT",
-              "Dependencies" : {
-                "component_name" : "OOZIE_CLIENT",
-                "dependent_component_name" : "FALCON_SERVER",
-                "dependent_service_name" : "FALCON",
-                "scope" : "cluster",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/components/FALCON_SERVER/dependencies/OOZIE_SERVER",
-              "Dependencies" : {
-                "component_name" : "OOZIE_SERVER",
-                "dependent_component_name" : "FALCON_SERVER",
-                "dependent_service_name" : "FALCON",
-                "scope" : "cluster",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/artifacts/kerberos_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "kerberos_descriptor",
-            "service_name" : "FALCON",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "FALCON",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FLUME",
-      "StackServices" : {
-        "comments" : "A distributed service for collecting, aggregating, and moving large amounts of streaming data into HDFS",
-        "custom_commands" : [ ],
-        "display_name" : "Flume",
-        "required_services" : [
-          "HDFS"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "FLUME",
-        "service_version" : "1.4.0.2.1",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "flume-conf" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "flume-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          }
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FLUME/components/FLUME_HANDLER",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "SLAVE",
-            "component_name" : "FLUME_HANDLER",
-            "custom_commands" : [ ],
-            "display_name" : "Flume",
-            "is_client" : false,
-            "is_master" : false,
-            "service_name" : "FLUME",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FLUME/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "FLUME",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/GANGLIA",
-      "StackServices" : {
-        "comments" : "Ganglia Metrics Collection system (<a href=\"http://oss.oetiker.ch/rrdtool/\" target=\"_blank\">RRDTool</a> will be installed too)",
-        "custom_commands" : [ ],
-        "display_name" : "Ganglia",
-        "required_services" : [ ],
-        "service_check_supported" : false,
-        "service_name" : "GANGLIA",
-        "service_version" : "3.5.0",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "ganglia-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          }
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/GANGLIA/components/GANGLIA_MONITOR",
-          "StackServiceComponents" : {
-            "cardinality" : "ALL",
-            "component_category" : "SLAVE",
-            "component_name" : "GANGLIA_MONITOR",
-            "custom_commands" : [ ],
-            "display_name" : "Ganglia Monitor",
-            "is_client" : false,
-            "is_master" : false,
-            "service_name" : "GANGLIA",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "auto_deploy" : {
-            "enabled" : true
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/GANGLIA/components/GANGLIA_SERVER",
-          "StackServiceComponents" : {
-            "cardinality" : "1",
-            "component_category" : "MASTER",
-            "component_name" : "GANGLIA_SERVER",
-            "custom_commands" : [ ],
-            "display_name" : "Ganglia Server",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "GANGLIA",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/GANGLIA/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "GANGLIA",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE",
-      "StackServices" : {
-        "comments" : "Non-relational distributed database and centralized service for configuration management &\n        synchronization\n      ",
-        "custom_commands" : [ ],
-        "display_name" : "HBase",
-        "required_services" : [
-          "ZOOKEEPER",
-          "HDFS"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "HBASE",
-        "service_version" : "0.98.0.2.1",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "hbase-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "hbase-log4j" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "hbase-policy" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          },
-          "hbase-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          }
-        },
-        "kerberos_descriptor" : {
-          "components" : [
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "hbase-site/hbase.regionserver.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${hbase-env/hbase_user}",
-                    "value" : "hbase/_HOST@${realm}"
-                  },
-                  "name" : "hbase_regionserver_hbase",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${hbase-env/hbase_user}"
-                    },
-                    "file" : "${keytab_dir}/hbase.service.keytab",
-                    "configuration" : "hbase-site/hbase.regionserver.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                }
-              ],
-              "name" : "HBASE_REGIONSERVER"
-            },
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "hbase-site/hbase.master.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${hbase-env/hbase_user}",
-                    "value" : "hbase/_HOST@${realm}"
-                  },
-                  "name" : "hbase_master_hbase",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${hbase-env/hbase_user}"
-                    },
-                    "file" : "${keytab_dir}/hbase.service.keytab",
-                    "configuration" : "hbase-site/hbase.master.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                }
-              ],
-              "name" : "HBASE_MASTER"
-            }
-          ],
-          "configurations" : [
-            {
-              "hbase-site" : {
-                "hbase.coprocessor.master.classes" : "org.apache.hadoop.hbase.security.access.AccessController",
-                "hbase.security.authentication" : "kerberos",
-                "hbase.coprocessor.region.classes" : "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController",
-                "hbase.security.authorization" : "true",
-                "hbase.bulkload.staging.dir" : "/apps/hbase/staging",
-                "zookeeper.znode.parent" : "/hbase-secure"
-              }
-            }
-          ],
-          "identities" : [
-            {
-              "name" : "/spnego"
-            },
-            {
-              "name" : "/hdfs"
-            },
-            {
-              "principal" : {
-                "configuration" : "hbase-env/hbase_principal_name",
-                "type" : "user",
-                "local_username" : "${hbase-env/hbase_user}",
-                "value" : "${hbase-env/hbase_user}@${realm}"
-              },
-              "name" : "hbase",
-              "keytab" : {
-                "owner" : {
-                  "access" : "r",
-                  "name" : "${hbase-env/hbase_user}"
-                },
-                "file" : "${keytab_dir}/hbase.headless.keytab",
-                "configuration" : "hbase-env/hbase_user_keytab",
-                "group" : {
-                  "access" : "r",
-                  "name" : "${cluster-env/user_group}"
-                }
-              }
-            },
-            {
-              "name" : "/smokeuser"
-            }
-          ],
-          "name" : "HBASE"
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_CLIENT",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "CLIENT",
-            "component_name" : "HBASE_CLIENT",
-            "custom_commands" : [ ],
-            "display_name" : "HBase Client",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "HBASE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_MASTER",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "MASTER",
-            "component_name" : "HBASE_MASTER",
-            "custom_commands" : [
-              "DECOMMISSION"
-            ],
-            "display_name" : "HBase Master",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "HBASE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_MASTER/dependencies/HDFS_CLIENT",
-              "Dependencies" : {
-                "component_name" : "HDFS_CLIENT",
-                "dependent_component_name" : "HBASE_MASTER",
-                "dependent_service_name" : "HBASE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_MASTER/dependencies/ZOOKEEPER_SERVER",
-              "Dependencies" : {
-                "component_name" : "ZOOKEEPER_SERVER",
-                "dependent_component_name" : "HBASE_MASTER",
-                "dependent_service_name" : "HBASE",
-                "scope" : "cluster",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_REGIONSERVER",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "SLAVE",
-            "component_name" : "HBASE_REGIONSERVER",
-            "custom_commands" : [ ],
-            "display_name" : "RegionServer",
-            "is_client" : false,
-            "is_master" : false,
-            "service_name" : "HBASE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/artifacts/kerberos_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "kerberos_descriptor",
-            "service_name" : "HBASE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "HBASE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/artifacts/widgets_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "widgets_descriptor",
-            "service_name" : "HBASE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS",
-      "StackServices" : {
-        "comments" : "Apache Hadoop Distributed File System",
-        "custom_commands" : [ ],
-        "display_name" : "HDFS",
-        "required_services" : [
-          "ZOOKEEPER"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "HDFS",
-        "service_version" : "2.4.0.2.1",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "core-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          },
-          "hadoop-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "hadoop-policy" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          },
-          "hdfs-log4j" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "hdfs-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          }
-        },
-        "kerberos_descriptor" : {
-          "auth_to_local_properties" : [
-            "core-site/hadoop.security.auth_to_local"
-          ],
-          "components" : [
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "hdfs-site/dfs.secondary.namenode.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${hadoop-env/hdfs_user}",
-                    "value" : "nn/_HOST@${realm}"
-                  },
-                  "name" : "secondary_namenode_nn",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${hadoop-env/hdfs_user}"
-                    },
-                    "file" : "${keytab_dir}/nn.service.keytab",
-                    "configuration" : "hdfs-site/dfs.secondary.namenode.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal",
-                    "type" : "service",
-                    "local_username" : null,
-                    "value" : null
-                  },
-                  "name" : "/spnego"
-                }
-              ],
-              "name" : "SECONDARY_NAMENODE"
-            },
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "hdfs-site/dfs.datanode.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${hadoop-env/hdfs_user}",
-                    "value" : "dn/_HOST@${realm}"
-                  },
-                  "name" : "datanode_dn",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${hadoop-env/hdfs_user}"
-                    },
-                    "file" : "${keytab_dir}/dn.service.keytab",
-                    "configuration" : "hdfs-site/dfs.datanode.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                }
-              ],
-              "configurations" : [
-                {
-                  "hdfs-site" : {
-                    "dfs.datanode.address" : "0.0.0.0:1019",
-                    "dfs.datanode.http.address" : "0.0.0.0:1022"
-                  }
-                }
-              ],
-              "name" : "DATANODE"
-            },
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "hdfs-site/nfs.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${hadoop-env/hdfs_user}",
-                    "value" : "nfs/_HOST@${realm}"
-                  },
-                  "name" : "nfsgateway",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${hadoop-env/hdfs_user}"
-                    },
-                    "file" : "${keytab_dir}/nfs.service.keytab",
-                    "configuration" : "hdfs-site/nfs.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                }
-              ],
-              "name" : "NFS_GATEWAY"
-            },
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "hdfs-site/dfs.journalnode.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${hadoop-env/hdfs_user}",
-                    "value" : "jn/_HOST@${realm}"
-                  },
-                  "name" : "journalnode_jn",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${hadoop-env/hdfs_user}"
-                    },
-                    "file" : "${keytab_dir}/jn.service.keytab",
-                    "configuration" : "hdfs-site/dfs.journalnode.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal",
-                    "type" : "service",
-                    "local_username" : null,
-                    "value" : null
-                  },
-                  "name" : "/spnego"
-                }
-              ],
-              "name" : "JOURNALNODE"
-            },
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "hdfs-site/dfs.namenode.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${hadoop-env/hdfs_user}",
-                    "value" : "nn/_HOST@${realm}"
-                  },
-                  "name" : "namenode_nn",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${hadoop-env/hdfs_user}"
-                    },
-                    "file" : "${keytab_dir}/nn.service.keytab",
-                    "configuration" : "hdfs-site/dfs.namenode.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal",
-                    "type" : "service",
-                    "local_username" : null,
-                    "value" : null
-                  },
-                  "name" : "/spnego"
-                }
-              ],
-              "configurations" : [
-                {
-                  "hdfs-site" : {
-                    "dfs.block.access.token.enable" : "true"
-                  }
-                }
-              ],
-              "name" : "NAMENODE"
-            }
-          ],
-          "configurations" : [
-            {
-              "core-site" : {
-                "hadoop.http.authentication.cookie.domain" : "",
-                "hadoop.security.authentication" : "kerberos",
-                "hadoop.http.authentication.signer.secret.provider.object" : "",
-                "hadoop.http.authentication.kerberos.name.rules" : "",
-                "hadoop.security.auth_to_local" : "",
-                "hadoop.http.authentication.token.validity" : "",
-                "hadoop.rpc.protection" : "authentication",
-                "hadoop.http.authentication.cookie.path" : "",
-                "hadoop.security.authorization" : "true",
-                "hadoop.http.authentication.type" : "simple",
-                "hadoop.http.authentication.signature.secret.file" : "",
-                "hadoop.http.authentication.signature.secret" : "",
-                "hadoop.http.filter.initializers" : "",
-                "hadoop.http.authentication.signer.secret.provider" : "",
-                "hadoop.proxyuser.HTTP.groups" : "${hadoop-env/proxyuser_group}"
-              }
-            }
-          ],
-          "identities" : [
-            {
-              "principal" : {
-                "configuration" : "hdfs-site/dfs.web.authentication.kerberos.principal",
-                "type" : "service",
-                "local_username" : null,
-                "value" : null
-              },
-              "name" : "/spnego",
-              "keytab" : {
-                "owner" : {
-                  "access" : null,
-                  "name" : null
-                },
-                "file" : null,
-                "configuration" : "hdfs-site/dfs.web.authentication.kerberos.keytab",
-                "group" : {
-                  "access" : null,
-                  "name" : null
-                }
-              }
-            },
-            {
-              "name" : "/smokeuser"
-            },
-            {
-              "name" : "/hdfs"
-            }
-          ],
-          "name" : "HDFS"
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/DATANODE",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "SLAVE",
-            "component_name" : "DATANODE",
-            "custom_commands" : [ ],
-            "display_name" : "DataNode",
-            "is_client" : false,
-            "is_master" : false,
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/HDFS_CLIENT",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "CLIENT",
-            "component_name" : "HDFS_CLIENT",
-            "custom_commands" : [ ],
-            "display_name" : "HDFS Client",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/JOURNALNODE",
-          "StackServiceComponents" : {
-            "cardinality" : "0+",
-            "component_category" : "SLAVE",
-            "component_name" : "JOURNALNODE",
-            "custom_commands" : [ ],
-            "display_name" : "JournalNode",
-            "is_client" : false,
-            "is_master" : false,
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
-              "Dependencies" : {
-                "component_name" : "HDFS_CLIENT",
-                "dependent_component_name" : "JOURNALNODE",
-                "dependent_service_name" : "HDFS",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/NAMENODE",
-          "StackServiceComponents" : {
-            "cardinality" : "1-2",
-            "component_category" : "MASTER",
-            "component_name" : "NAMENODE",
-            "custom_commands" : [
-              "DECOMMISSION",
-              "REBALANCEHDFS"
-            ],
-            "display_name" : "NameNode",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/SECONDARY_NAMENODE",
-          "StackServiceComponents" : {
-            "cardinality" : "1",
-            "component_category" : "MASTER",
-            "component_name" : "SECONDARY_NAMENODE",
-            "custom_commands" : [ ],
-            "display_name" : "SNameNode",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/ZKFC",
-          "StackServiceComponents" : {
-            "cardinality" : "0+",
-            "component_category" : "SLAVE",
-            "component_name" : "ZKFC",
-            "custom_commands" : [ ],
-            "display_name" : "ZKFailoverController",
-            "is_client" : false,
-            "is_master" : false,
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/artifacts/kerberos_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "kerberos_descriptor",
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/artifacts/widgets_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "widgets_descriptor",
-            "service_name" : "HDFS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE",
-      "StackServices" : {
-        "comments" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
-        "custom_commands" : [ ],
-        "display_name" : "Hive",
-        "required_services" : [
-          "ZOOKEEPER",
-          "YARN",
-          "TEZ"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "HIVE",
-        "service_version" : "0.13.0.2.1",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "hcat-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "hive-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "hive-exec-log4j" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "hive-log4j" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "hive-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          },
-          "webhcat-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "webhcat-log4j" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "webhcat-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          }
-        },
-        "kerberos_descriptor" : {
-          "components" : [
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "hive-site/hive.server2.authentication.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${hive-env/hive_user}",
-                    "value" : "hive/_HOST@${realm}"
-                  },
-                  "name" : "hive_server_hive",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${hive-env/hive_user}"
-                    },
-                    "file" : "${keytab_dir}/hive.service.keytab",
-                    "configuration" : "hive-site/hive.server2.authentication.kerberos.keytab",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "hive-site/hive.server2.authentication.spnego.principal",
-                    "type" : "service",
-                    "local_username" : null,
-                    "value" : null
-                  },
-                  "name" : "/spnego",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : null,
-                      "name" : null
-                    },
-                    "file" : null,
-                    "configuration" : "hive-site/hive.server2.authentication.spnego.keytab",
-                    "group" : {
-                      "access" : null,
-                      "name" : null
-                    }
-                  }
-                }
-              ],
-              "name" : "HIVE_SERVER"
-            },
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "hive-site/hive.metastore.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${hive-env/hive_user}",
-                    "value" : "hive/_HOST@${realm}"
-                  },
-                  "name" : "hive_metastore_hive",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${hive-env/hive_user}"
-                    },
-                    "file" : "${keytab_dir}/hive.service.keytab",
-                    "configuration" : "hive-site/hive.metastore.kerberos.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                }
-              ],
-              "name" : "HIVE_METASTORE"
-            },
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "webhcat-site/templeton.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : null,
-                    "value" : null
-                  },
-                  "name" : "/spnego",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : null,
-                      "name" : null
-                    },
-                    "file" : null,
-                    "configuration" : "webhcat-site/templeton.kerberos.keytab",
-                    "group" : {
-                      "access" : null,
-                      "name" : null
-                    }
-                  }
-                }
-              ],
-              "name" : "WEBHCAT_SERVER"
-            }
-          ],
-          "configurations" : [
-            {
-              "hive-site" : {
-                "hive.metastore.sasl.enabled" : "true",
-                "hive.server2.authentication" : "KERBEROS",
-                "hive.security.authorization.enabled" : "true"
-              }
-            },
-            {
-              "webhcat-site" : {
-                "templeton.hive.properties" : "hive.metastore.local=false,hive.metastore.uris=thrift://${clusterHostInfo/hive_metastore_host}:9083,hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}",
-                "templeton.kerberos.secret" : "secret"
-              }
-            },
-            {
-              "core-site" : {
-                "hadoop.proxyuser.HTTP.hosts" : "${clusterHostInfo/webhcat_server_host}"
-              }
-            }
-          ],
-          "identities" : [
-            {
-              "name" : "/spnego"
-            },
-            {
-              "name" : "/smokeuser"
-            }
-          ],
-          "name" : "HIVE"
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HCAT",
-          "StackServiceComponents" : {
-            "cardinality" : null,
-            "component_category" : "CLIENT",
-            "component_name" : "HCAT",
-            "custom_commands" : [ ],
-            "display_name" : "HCat Client",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "HIVE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_CLIENT",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "CLIENT",
-            "component_name" : "HIVE_CLIENT",
-            "custom_commands" : [ ],
-            "display_name" : "Hive Client",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "HIVE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_METASTORE",
-          "StackServiceComponents" : {
-            "cardinality" : "1",
-            "component_category" : "MASTER",
-            "component_name" : "HIVE_METASTORE",
-            "custom_commands" : [ ],
-            "display_name" : "Hive Metastore",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "HIVE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "auto_deploy" : {
-            "enabled" : true,
-            "location" : "HIVE/HIVE_SERVER"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER",
-          "StackServiceComponents" : {
-            "cardinality" : "1",
-            "component_category" : "MASTER",
-            "component_name" : "HIVE_SERVER",
-            "custom_commands" : [ ],
-            "display_name" : "HiveServer2",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "HIVE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER/dependencies/MAPREDUCE2_CLIENT",
-              "Dependencies" : {
-                "component_name" : "MAPREDUCE2_CLIENT",
-                "dependent_component_name" : "HIVE_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER/dependencies/TEZ_CLIENT",
-              "Dependencies" : {
-                "component_name" : "TEZ_CLIENT",
-                "dependent_component_name" : "HIVE_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER/dependencies/YARN_CLIENT",
-              "Dependencies" : {
-                "component_name" : "YARN_CLIENT",
-                "dependent_component_name" : "HIVE_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER/dependencies/ZOOKEEPER_SERVER",
-              "Dependencies" : {
-                "component_name" : "ZOOKEEPER_SERVER",
-                "dependent_component_name" : "HIVE_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "cluster",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/MYSQL_SERVER",
-          "StackServiceComponents" : {
-            "cardinality" : "0-1",
-            "component_category" : "MASTER",
-            "component_name" : "MYSQL_SERVER",
-            "custom_commands" : [
-              "CLEAN"
-            ],
-            "display_name" : "MySQL Server",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "HIVE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER",
-          "StackServiceComponents" : {
-            "cardinality" : "1",
-            "component_category" : "MASTER",
-            "component_name" : "WEBHCAT_SERVER",
-            "custom_commands" : [ ],
-            "display_name" : "WebHCat Server",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "HIVE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/HDFS_CLIENT",
-              "Dependencies" : {
-                "component_name" : "HDFS_CLIENT",
-                "dependent_component_name" : "WEBHCAT_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/MAPREDUCE2_CLIENT",
-              "Dependencies" : {
-                "component_name" : "MAPREDUCE2_CLIENT",
-                "dependent_component_name" : "WEBHCAT_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/PIG",
-              "Dependencies" : {
-                "component_name" : "PIG",
-                "dependent_component_name" : "WEBHCAT_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/YARN_CLIENT",
-              "Dependencies" : {
-                "component_name" : "YARN_CLIENT",
-                "dependent_component_name" : "WEBHCAT_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/ZOOKEEPER_CLIENT",
-              "Dependencies" : {
-                "component_name" : "ZOOKEEPER_CLIENT",
-                "dependent_component_name" : "WEBHCAT_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/ZOOKEEPER_SERVER",
-              "Dependencies" : {
-                "component_name" : "ZOOKEEPER_SERVER",
-                "dependent_component_name" : "WEBHCAT_SERVER",
-                "dependent_service_name" : "HIVE",
-                "scope" : "cluster",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/artifacts/kerberos_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "kerberos_descriptor",
-            "service_name" : "HIVE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "HIVE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/KERBEROS",
-      "StackServices" : {
-        "comments" : "A computer network authentication protocol which works on\n        the basis of 'tickets' to allow nodes communicating over a\n        non-secure network to prove their identity to one another in a\n        secure manner.\n      ",
-        "custom_commands" : [ ],
-        "display_name" : "Kerberos",
-        "required_services" : [ ],
-        "service_check_supported" : true,
-        "service_name" : "KERBEROS",
-        "service_version" : "1.10.3-10",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "kerberos-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "krb5-conf" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          }
-        },
-        "kerberos_descriptor" : {
-          "components" : [
-            {
-              "name" : "KERBEROS_CLIENT"
-            }
-          ],
-          "identities" : [
-            {
-              "name" : "/smokeuser"
-            }
-          ],
-          "name" : "KERBEROS"
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/KERBEROS/components/KERBEROS_CLIENT",
-          "StackServiceComponents" : {
-            "cardinality" : "ALL",
-            "component_category" : "CLIENT",
-            "component_name" : "KERBEROS_CLIENT",
-            "custom_commands" : [
-              "SET_KEYTAB",
-              "REMOVE_KEYTAB"
-            ],
-            "display_name" : "Kerberos Client",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "KERBEROS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "auto_deploy" : {
-            "enabled" : true
-          },
-          "dependencies" : [ ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/KERBEROS/artifacts/kerberos_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "kerberos_descriptor",
-            "service_name" : "KERBEROS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/KERBEROS/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "KERBEROS",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2",
-      "StackServices" : {
-        "comments" : "Apache Hadoop NextGen MapReduce (YARN)",
-        "custom_commands" : [ ],
-        "display_name" : "MapReduce2",
-        "required_services" : [
-          "YARN"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "MAPREDUCE2",
-        "service_version" : "2.1.0.2.0.6.0",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "mapred-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "mapred-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          }
-        },
-        "kerberos_descriptor" : {
-          "components" : [
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "mapred-site/mapreduce.jobhistory.principal",
-                    "type" : "service",
-                    "local_username" : "${mapred-env/mapred_user}",
-                    "value" : "jhs/_HOST@${realm}"
-                  },
-                  "name" : "history_server_jhs",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${mapred-env/mapred_user}"
-                    },
-                    "file" : "${keytab_dir}/jhs.service.keytab",
-                    "configuration" : "mapred-site/mapreduce.jobhistory.keytab",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "mapred-site/mapreduce.jobhistory.webapp.spnego-principal",
-                    "type" : "service",
-                    "local_username" : null,
-                    "value" : null
-                  },
-                  "name" : "/spnego",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : null,
-                      "name" : null
-                    },
-                    "file" : null,
-                    "configuration" : "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file",
-                    "group" : {
-                      "access" : null,
-                      "name" : null
-                    }
-                  }
-                }
-              ],
-              "name" : "HISTORYSERVER"
-            }
-          ],
-          "identities" : [
-            {
-              "name" : "/spnego"
-            },
-            {
-              "name" : "/hdfs"
-            },
-            {
-              "name" : "/smokeuser"
-            }
-          ],
-          "name" : "MAPREDUCE2"
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/components/HISTORYSERVER",
-          "StackServiceComponents" : {
-            "cardinality" : "1",
-            "component_category" : "MASTER",
-            "component_name" : "HISTORYSERVER",
-            "custom_commands" : [ ],
-            "display_name" : "History Server",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "MAPREDUCE2",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "auto_deploy" : {
-            "enabled" : true,
-            "location" : "YARN/RESOURCEMANAGER"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/components/HISTORYSERVER/dependencies/HDFS_CLIENT",
-              "Dependencies" : {
-                "component_name" : "HDFS_CLIENT",
-                "dependent_component_name" : "HISTORYSERVER",
-                "dependent_service_name" : "MAPREDUCE2",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/components/MAPREDUCE2_CLIENT",
-          "StackServiceComponents" : {
-            "cardinality" : "0+",
-            "component_category" : "CLIENT",
-            "component_name" : "MAPREDUCE2_CLIENT",
-            "custom_commands" : [ ],
-            "display_name" : "MapReduce2 Client",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "MAPREDUCE2",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/artifacts/kerberos_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "kerberos_descriptor",
-            "service_name" : "MAPREDUCE2",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "MAPREDUCE2",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/artifacts/widgets_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "widgets_descriptor",
-            "service_name" : "MAPREDUCE2",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE",
-      "StackServices" : {
-        "comments" : "System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the <a target=\"_blank\" href=\"http://www.sencha.com/legal/open-source-faq/\">ExtJS</a> Library.\n      ",
-        "custom_commands" : [ ],
-        "display_name" : "Oozie",
-        "required_services" : [
-          "YARN"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "OOZIE",
-        "service_version" : "4.0.0.2.1",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "oozie-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "oozie-log4j" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "oozie-site" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "true"
-            }
-          }
-        },
-        "kerberos_descriptor" : {
-          "auth_to_local_properties" : [
-            "oozie-site/oozie.authentication.kerberos.name.rules"
-          ],
-          "components" : [
-            {
-              "identities" : [
-                {
-                  "principal" : {
-                    "configuration" : "oozie-site/oozie.service.HadoopAccessorService.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : "${oozie-env/oozie_user}",
-                    "value" : "oozie/_HOST@${realm}"
-                  },
-                  "name" : "oozie_server",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : "r",
-                      "name" : "${oozie-env/oozie_user}"
-                    },
-                    "file" : "${keytab_dir}/oozie.service.keytab",
-                    "configuration" : "oozie-site/oozie.service.HadoopAccessorService.keytab.file",
-                    "group" : {
-                      "access" : "",
-                      "name" : "${cluster-env/user_group}"
-                    }
-                  }
-                },
-                {
-                  "principal" : {
-                    "configuration" : "oozie-site/oozie.authentication.kerberos.principal",
-                    "type" : "service",
-                    "local_username" : null,
-                    "value" : null
-                  },
-                  "name" : "/spnego",
-                  "keytab" : {
-                    "owner" : {
-                      "access" : null,
-                      "name" : null
-                    },
-                    "file" : null,
-                    "configuration" : "oozie-site/oozie.authentication.kerberos.keytab",
-                    "group" : {
-                      "access" : null,
-                      "name" : null
-                    }
-                  }
-                }
-              ],
-              "name" : "OOZIE_SERVER"
-            }
-          ],
-          "configurations" : [
-            {
-              "oozie-site" : {
-                "oozie.service.HadoopAccessorService.kerberos.enabled" : "true",
-                "oozie.authentication.type" : "kerberos",
-                "oozie.authentication.kerberos.name.rules" : "",
-                "oozie.service.AuthorizationService.authorization.enabled" : "true",
-                "local.realm" : "${realm}"
-              }
-            }
-          ],
-          "identities" : [
-            {
-              "name" : "/spnego"
-            },
-            {
-              "name" : "/smokeuser"
-            },
-            {
-              "name" : "/hdfs"
-            }
-          ],
-          "name" : "OOZIE"
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_CLIENT",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "CLIENT",
-            "component_name" : "OOZIE_CLIENT",
-            "custom_commands" : [ ],
-            "display_name" : "Oozie Client",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "OOZIE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_CLIENT/dependencies/HDFS_CLIENT",
-              "Dependencies" : {
-                "component_name" : "HDFS_CLIENT",
-                "dependent_component_name" : "OOZIE_CLIENT",
-                "dependent_service_name" : "OOZIE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_CLIENT/dependencies/MAPREDUCE2_CLIENT",
-              "Dependencies" : {
-                "component_name" : "MAPREDUCE2_CLIENT",
-                "dependent_component_name" : "OOZIE_CLIENT",
-                "dependent_service_name" : "OOZIE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_SERVER",
-          "StackServiceComponents" : {
-            "cardinality" : "1",
-            "component_category" : "MASTER",
-            "component_name" : "OOZIE_SERVER",
-            "custom_commands" : [ ],
-            "display_name" : "Oozie Server",
-            "is_client" : false,
-            "is_master" : true,
-            "service_name" : "OOZIE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_SERVER/dependencies/HDFS_CLIENT",
-              "Dependencies" : {
-                "component_name" : "HDFS_CLIENT",
-                "dependent_component_name" : "OOZIE_SERVER",
-                "dependent_service_name" : "OOZIE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_SERVER/dependencies/MAPREDUCE2_CLIENT",
-              "Dependencies" : {
-                "component_name" : "MAPREDUCE2_CLIENT",
-                "dependent_component_name" : "OOZIE_SERVER",
-                "dependent_service_name" : "OOZIE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_SERVER/dependencies/YARN_CLIENT",
-              "Dependencies" : {
-                "component_name" : "YARN_CLIENT",
-                "dependent_component_name" : "OOZIE_SERVER",
-                "dependent_service_name" : "OOZIE",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/artifacts/kerberos_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "kerberos_descriptor",
-            "service_name" : "OOZIE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        },
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "OOZIE",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/PIG",
-      "StackServices" : {
-        "comments" : "Scripting platform for analyzing large datasets",
-        "custom_commands" : [ ],
-        "display_name" : "Pig",
-        "required_services" : [
-          "YARN"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "PIG",
-        "service_version" : "0.12.1.2.1",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "pig-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "pig-log4j" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          },
-          "pig-properties" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          }
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/PIG/components/PIG",
-          "StackServiceComponents" : {
-            "cardinality" : "0+",
-            "component_category" : "CLIENT",
-            "component_name" : "PIG",
-            "custom_commands" : [ ],
-            "display_name" : "Pig",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "PIG",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [ ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/PIG/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "service_name" : "PIG",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          }
-        }
-      ]
-    },
-    {
-      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP",
-      "StackServices" : {
-        "comments" : "Tool for transferring bulk data between Apache Hadoop and\n        structured data stores such as relational databases\n      ",
-        "custom_commands" : [ ],
-        "display_name" : "Sqoop",
-        "required_services" : [
-          "HDFS"
-        ],
-        "service_check_supported" : true,
-        "service_name" : "SQOOP",
-        "service_version" : "1.4.4.2.1",
-        "stack_name" : "HDP",
-        "stack_version" : "2.1",
-        "user_name" : null,
-        "config_types" : {
-          "sqoop-env" : {
-            "supports" : {
-              "adding_forbidden" : "false",
-              "do_not_extend" : "false",
-              "final" : "false"
-            }
-          }
-        }
-      },
-      "components" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP/components/SQOOP",
-          "StackServiceComponents" : {
-            "cardinality" : "1+",
-            "component_category" : "CLIENT",
-            "component_name" : "SQOOP",
-            "custom_commands" : [ ],
-            "display_name" : "Sqoop",
-            "is_client" : true,
-            "is_master" : false,
-            "service_name" : "SQOOP",
-            "stack_name" : "HDP",
-            "stack_version" : "2.1"
-          },
-          "dependencies" : [
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP/components/SQOOP/dependencies/HDFS_CLIENT",
-              "Dependencies" : {
-                "component_name" : "HDFS_CLIENT",
-                "dependent_component_name" : "SQOOP",
-                "dependent_service_name" : "SQOOP",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            },
-            {
-              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP/components/SQOOP/dependencies/MAPREDUCE2_CLIENT",
-              "Dependencies" : {
-                "component_name" : "MAPREDUCE2_CLIENT",
-                "dependent_component_name" : "SQOOP",
-                "dependent_service_name" : "SQOOP",
-                "scope" : "host",
-                "stack_name" : "HDP",
-                "stack_version" : "2.1"
-              }
-            }
-          ]
-        }
-      ],
-      "artifacts" : [
-        {
-          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP/artifacts/metrics_descriptor",
-          "Artifacts" : {
-            "artifact_name" : "metrics_descriptor",
-            "se

<TRUNCATED>

[37/50] [abbrv] ambari git commit: AMBARI-19574 - Add upgrade logic for the heap dump control option added in HDP 2.6 stack (jonathanhurley)

Posted by nc...@apache.org.
AMBARI-19574 - Add upgrade logic for the heap dump control option added in HDP 2.6 stack (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/410f2943
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/410f2943
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/410f2943

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 410f2943d5babf6b9bd3e2f225fbbe9ac5761ee2
Parents: 4dac278
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Jan 16 20:26:24 2017 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Jan 17 18:51:50 2017 -0500

----------------------------------------------------------------------
 .../serveraction/upgrades/ConfigureAction.java  | 142 +++++++++++---
 .../upgrade/ConfigUpgradeChangeDefinition.java  | 194 +++++++++++++------
 .../state/stack/upgrade/ConfigureTask.java      |  45 ++---
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |  27 +++
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |  19 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |  14 ++
 .../src/main/resources/upgrade-config.xsd       |  16 ++
 .../upgrades/ConfigureActionTest.java           | 147 +++++++++++---
 8 files changed, 455 insertions(+), 149 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/410f2943/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
index 97280ee..a42e667 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
@@ -46,6 +46,7 @@ import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.ConfigurationKeyValue;
+import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Insert;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Masked;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Replace;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Transfer;
@@ -185,7 +186,7 @@ public class ConfigureAction extends AbstractServerAction {
     // such as hdfs-site or hbase-env
     String configType = commandParameters.get(ConfigureTask.PARAMETER_CONFIG_TYPE);
 
-    // extract transfers
+    // extract setters
     List<ConfigurationKeyValue> keyValuePairs = Collections.emptyList();
     String keyValuePairJson = commandParameters.get(ConfigureTask.PARAMETER_KEY_VALUE_PAIRS);
     if (null != keyValuePairJson) {
@@ -212,14 +213,22 @@ public class ConfigureAction extends AbstractServerAction {
       replacements = getAllowedReplacements(cluster, configType, replacements);
     }
 
+    // extract insertions
+    List<Insert> insertions = Collections.emptyList();
+    String insertJson = commandParameters.get(ConfigureTask.PARAMETER_INSERTIONS);
+    if (null != insertJson) {
+      insertions = m_gson.fromJson(
+          insertJson, new TypeToken<List<Insert>>(){}.getType());
+    }
+
     // if there is nothing to do, then skip the task
-    if (keyValuePairs.isEmpty() && transfers.isEmpty() && replacements.isEmpty()) {
-      String message = "cluster={0}, type={1}, transfers={2}, replacements={3}, configurations={4}";
+    if (keyValuePairs.isEmpty() && transfers.isEmpty() && replacements.isEmpty() && insertions.isEmpty()) {
+      String message = "cluster={0}, type={1}, transfers={2}, replacements={3}, insertions={4}, configurations={5}";
       message = MessageFormat.format(message, clusterName, configType, transfers, replacements,
-          keyValuePairs);
+          insertions, keyValuePairs);
 
       StringBuilder buffer = new StringBuilder(
-          "Skipping this configuration task since none of the conditions were met and there are no transfers or replacements").append("\n");
+          "Skipping this configuration task since none of the conditions were met and there are no transfers, replacements, or insertions.").append("\n");
 
       buffer.append(message);
 
@@ -229,9 +238,12 @@ public class ConfigureAction extends AbstractServerAction {
     // if only 1 of the required properties was null and no transfer properties,
     // then something went wrong
     if (null == clusterName || null == configType
-        || (keyValuePairs.isEmpty() && transfers.isEmpty() && replacements.isEmpty())) {
-      String message = "cluster={0}, type={1}, transfers={2}, replacements={3}, configurations={4}";
-      message = MessageFormat.format(message, clusterName, configType, transfers, replacements, keyValuePairs);
+        || (keyValuePairs.isEmpty() && transfers.isEmpty() && replacements.isEmpty() && insertions.isEmpty())) {
+      String message = "cluster={0}, type={1}, transfers={2}, replacements={3}, insertions={4}, configurations={5}";
+
+      message = MessageFormat.format(message, clusterName, configType, transfers, replacements,
+          insertions, keyValuePairs);
+
       return createCommandReport(0, HostRoleStatus.FAILED, "{}", "", message);
     }
 
@@ -251,7 +263,7 @@ public class ConfigureAction extends AbstractServerAction {
 
     // !!! initial reference values
     Map<String, String> base = config.getProperties();
-    Map<String, String> newValues = new HashMap<String, String>(base);
+    Map<String, String> newValues = new HashMap<>(base);
 
     boolean changedValues = false;
 
@@ -287,7 +299,7 @@ public class ConfigureAction extends AbstractServerAction {
                 case YAML_ARRAY: {
                   // turn c6401,c6402 into ['c6401',c6402']
                   String[] splitValues = StringUtils.split(valueToCopy, ',');
-                  List<String> quotedValues = new ArrayList<String>(splitValues.length);
+                  List<String> quotedValues = new ArrayList<>(splitValues.length);
                   for (String splitValue : splitValues) {
                     quotedValues.add("'" + StringUtils.trim(splitValue) + "'");
                   }
@@ -306,7 +318,8 @@ public class ConfigureAction extends AbstractServerAction {
             newValues.put(transfer.toKey, valueToCopy);
 
             // append standard output
-            outputBuffer.append(MessageFormat.format("Created {0}/{1} = \"{2}\"\n", configType,
+            updateBufferWithMessage(outputBuffer, MessageFormat.format("Created {0}/{1} = \"{2}\"",
+                configType,
                 transfer.toKey, mask(transfer, valueToCopy)));
           }
           break;
@@ -319,15 +332,17 @@ public class ConfigureAction extends AbstractServerAction {
             changedValues = true;
 
             // append standard output
-            outputBuffer.append(MessageFormat.format("Renamed {0}/{1} to {2}/{3}\n", configType,
+            updateBufferWithMessage(outputBuffer,
+                MessageFormat.format("Renamed {0}/{1} to {2}/{3}", configType,
                 transfer.fromKey, configType, transfer.toKey));
+
           } else if (StringUtils.isNotBlank(transfer.defaultValue)) {
             newValues.put(transfer.toKey, transfer.defaultValue);
             changedValues = true;
 
             // append standard output
-            outputBuffer.append(MessageFormat.format(
-                "Created {0}/{1} with default value \"{2}\"\n",
+            updateBufferWithMessage(outputBuffer,
+                MessageFormat.format("Created {0}/{1} with default value \"{2}\"",
                 configType, transfer.toKey, mask(transfer, transfer.defaultValue)));
           }
 
@@ -337,15 +352,16 @@ public class ConfigureAction extends AbstractServerAction {
             newValues.clear();
 
             // append standard output
-            outputBuffer.append(MessageFormat.format("Deleted all keys from {0}\n", configType));
+            updateBufferWithMessage(outputBuffer,
+                MessageFormat.format("Deleted all keys from {0}", configType));
 
             for (String keeper : transfer.keepKeys) {
               if (base.containsKey(keeper) && base.get(keeper) != null) {
                 newValues.put(keeper, base.get(keeper));
 
                 // append standard output
-                outputBuffer.append(MessageFormat.format("Preserved {0}/{1} after delete\n",
-                  configType, keeper));
+                updateBufferWithMessage(outputBuffer,
+                    MessageFormat.format("Preserved {0}/{1} after delete", configType, keeper));
               }
             }
 
@@ -358,7 +374,8 @@ public class ConfigureAction extends AbstractServerAction {
                 newValues.put(changed, base.get(changed));
 
                 // append standard output
-                outputBuffer.append(MessageFormat.format("Preserved {0}/{1} after delete\n",
+                updateBufferWithMessage(outputBuffer,
+                    MessageFormat.format("Preserved {0}/{1} after delete",
                     configType, changed));
               }
             }
@@ -369,7 +386,8 @@ public class ConfigureAction extends AbstractServerAction {
             changedValues = true;
 
             // append standard output
-            outputBuffer.append(MessageFormat.format("Deleted {0}/{1}\n", configType,
+            updateBufferWithMessage(outputBuffer,
+                MessageFormat.format("Deleted {0}/{1}", configType,
                 transfer.deleteKey));
           }
 
@@ -389,7 +407,8 @@ public class ConfigureAction extends AbstractServerAction {
           // !!! values are not changing, so make this a no-op
           if (null != oldValue && value.equals(oldValue)) {
             if (currentStack.equals(targetStack) && !changedValues) {
-              outputBuffer.append(MessageFormat.format(
+              updateBufferWithMessage(outputBuffer,
+                  MessageFormat.format(
                   "{0}/{1} for cluster {2} would not change, skipping setting", configType, key,
                   clusterName));
 
@@ -409,40 +428,91 @@ public class ConfigureAction extends AbstractServerAction {
           if (StringUtils.isEmpty(value)) {
             message = MessageFormat.format("{0}/{1} changed to an empty value", configType, key);
           } else {
-            message = MessageFormat.format("{0}/{1} changed to \"{2}\"\n", configType, key,
+            message = MessageFormat.format("{0}/{1} changed to \"{2}\"", configType, key,
                 mask(keyValuePair, value));
           }
 
-          outputBuffer.append(message);
+          updateBufferWithMessage(outputBuffer, message);
         }
       }
     }
 
-    // !!! string replacements happen only on the new values.
+    // replacements happen only on the new values (as they are initialized from
+    // the existing pre-upgrade values)
     for (Replace replacement : replacements) {
       // the key might exist but might be null, so we need to check this
       // condition when replacing a part of the value
       String toReplace = newValues.get(replacement.key);
       if (StringUtils.isNotBlank(toReplace)) {
         if (!toReplace.contains(replacement.find)) {
-          outputBuffer.append(MessageFormat.format("String \"{0}\" was not found in {1}/{2}\n",
+          updateBufferWithMessage(outputBuffer,
+              MessageFormat.format("String \"{0}\" was not found in {1}/{2}",
               replacement.find, configType, replacement.key));
         } else {
           String replaced = StringUtils.replace(toReplace, replacement.find, replacement.replaceWith);
 
           newValues.put(replacement.key, replaced);
 
-          outputBuffer.append(
+          updateBufferWithMessage(outputBuffer,
               MessageFormat.format("Replaced {0}/{1} containing \"{2}\" with \"{3}\"", configType,
                   replacement.key, replacement.find, replacement.replaceWith));
-
-          outputBuffer.append(System.lineSeparator());
         }
       } else {
-        outputBuffer.append(MessageFormat.format(
+        updateBufferWithMessage(outputBuffer, MessageFormat.format(
             "Skipping replacement for {0}/{1} because it does not exist or is empty.",
             configType, replacement.key));
-        outputBuffer.append(System.lineSeparator());
+      }
+    }
+
+    // insertions happen only on the new values (as they are initialized from
+    // the existing pre-upgrade values)
+    for (Insert insert : insertions) {
+      String valueToInsertInto = newValues.get(insert.key);
+
+      // if the key doesn't exist, then do no work
+      if (StringUtils.isNotBlank(valueToInsertInto)) {
+        // make this insertion idempotent - don't do it if the value already
+        // contains the content
+        if (StringUtils.contains(valueToInsertInto, insert.value)) {
+          updateBufferWithMessage(outputBuffer,
+              MessageFormat.format("Skipping insertion for {0}/{1} because it already contains {2}",
+                  configType, insert.key, insert.value));
+
+          continue;
+        }
+
+        // new line work
+        String valueToInsert = insert.value;
+        if (insert.newlineBefore) {
+          valueToInsert = System.lineSeparator() + valueToInsert;
+        }
+
+        // new line work
+        if (insert.newlineAfter) {
+          valueToInsert = valueToInsert + System.lineSeparator();
+        }
+
+        switch (insert.insertType) {
+          case APPEND:
+            valueToInsertInto = valueToInsertInto + valueToInsert;
+            break;
+          case PREPEND:
+            valueToInsertInto = valueToInsert + valueToInsertInto;
+            break;
+          default:
+            LOG.error("Unable to insert {0}/{1} with unknown insertion type of {2}", configType,
+                insert.key, insert.insertType);
+            break;
+        }
+
+        newValues.put(insert.key, valueToInsertInto);
+
+        updateBufferWithMessage(outputBuffer, MessageFormat.format(
+            "Updated {0}/{1} by inserting {2}", configType, insert.key, insert.value));
+      } else {
+        updateBufferWithMessage(outputBuffer, MessageFormat.format(
+            "Skipping insertion for {0}/{1} because it does not exist or is empty.", configType,
+            insert.key));
       }
     }
 
@@ -492,7 +562,7 @@ public class ConfigureAction extends AbstractServerAction {
    */
   private List<String> findValuesToPreserve(String clusterName, Config config)
       throws AmbariException {
-    List<String> result = new ArrayList<String>();
+    List<String> result = new ArrayList<>();
 
     Map<String, Map<String, ThreeWayValue>> conflicts =
         m_mergeHelper.getConflicts(clusterName, config.getStackId());
@@ -519,7 +589,7 @@ public class ConfigureAction extends AbstractServerAction {
     // iterate over all properties for every cluster service; if the property
     // has the correct config type (ie oozie-site or hdfs-site) then add it to
     // the list of original stack propertiess
-    Set<String> stackPropertiesForType = new HashSet<String>(50);
+    Set<String> stackPropertiesForType = new HashSet<>(50);
     for (String serviceName : cluster.getServices().keySet()) {
       Set<PropertyInfo> serviceProperties = m_ambariMetaInfo.get().getServiceProperties(
           oldStack.getStackName(), oldStack.getStackVersion(), serviceName);
@@ -700,4 +770,14 @@ public class ConfigureAction extends AbstractServerAction {
 
     return config.getProperties().get(propertyKey);
   }
+
+  /**
+   * Appends the buffer with the message as well as a newline.
+   *
+   * @param buffer
+   * @param message
+   */
+  private void updateBufferWithMessage(StringBuilder buffer, String message) {
+    buffer.append(message).append(System.lineSeparator());
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/410f2943/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
index 31df790..5c0fba7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigUpgradeChangeDefinition.java
@@ -28,6 +28,8 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlAttribute;
 import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlEnum;
+import javax.xml.bind.annotation.XmlEnumValue;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlType;
 
@@ -36,7 +38,7 @@ import org.apache.ambari.server.state.Config;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.gson.Gson;
+import com.google.common.base.Objects;
 
 /**
  * The {@link ConfigUpgradeChangeDefinition} represents a configuration change. This change can be
@@ -109,10 +111,6 @@ public class ConfigUpgradeChangeDefinition {
 
   public static final Float DEFAULT_PRIORITY = 1.0f;
 
-  /**
-   * Gson
-   */
-  private Gson m_gson = new Gson();
 
   /**
    * An optional brief description of config changes.
@@ -137,6 +135,17 @@ public class ConfigUpgradeChangeDefinition {
 
   @XmlElement(name="regex-replace")
   private List<RegexReplace> regexReplacements;
+  /**
+   * Insert new content into an existing value by either prepending or
+   * appending. Each {@link Insert} will only run if:
+   * <ul>
+   * <li>The key specified by {@link Insert#key} exists.
+   * <li>The content specified by {@link Insert#value} is not found in the key's
+   * existing content.
+   * </ul>
+   */
+  @XmlElement(name = "insert")
+  private List<Insert> inserts;
 
   /**
    * @return the config type
@@ -250,6 +259,19 @@ public class ConfigUpgradeChangeDefinition {
 
 
   /**
+   * Gets the insertion directives.
+   *
+   * @return the inserts, or an empty list (never {@code null}).
+   */
+  public List<Insert> getInsertions() {
+    if (null == inserts) {
+      return Collections.emptyList();
+    }
+
+    return inserts;
+  }
+
+  /**
    * Used for configuration updates that should mask their values from being
    * printed in plain text.
    */
@@ -298,14 +320,12 @@ public class ConfigUpgradeChangeDefinition {
 
     @Override
     public String toString() {
-      return "Set{" +
-              ", key='" + key + '\'' +
-              ", value='" + value + '\'' +
-              ", ifKey='" + ifKey + '\'' +
-              ", ifType='" + ifType + '\'' +
-              ", ifValue='" + ifValue + '\'' +
-              ", ifKeyState='" + ifKeyState + '\'' +
-              '}';
+      return Objects.toStringHelper("Set").add("key", key)
+          .add("value", value)
+          .add("ifKey", ifKey)
+          .add("ifType", ifType)
+          .add("ifValue",ifValue)
+          .add("ifKeyState", ifKeyState).omitNullValues().toString();
     }
   }
 
@@ -370,26 +390,24 @@ public class ConfigUpgradeChangeDefinition {
      * The keys to keep when the action is {@link TransferOperation#DELETE}.
      */
     @XmlElement(name = "keep-key")
-    public List<String> keepKeys = new ArrayList<String>();
+    public List<String> keepKeys = new ArrayList<>();
 
 
     @Override
     public String toString() {
-      return "Transfer{" +
-              "operation=" + operation +
-              ", fromType='" + fromType + '\'' +
-              ", fromKey='" + fromKey + '\'' +
-              ", toKey='" + toKey + '\'' +
-              ", deleteKey='" + deleteKey + '\'' +
-              ", preserveEdits=" + preserveEdits +
-              ", defaultValue='" + defaultValue + '\'' +
-              ", coerceTo=" + coerceTo +
-              ", ifKey='" + ifKey + '\'' +
-              ", ifType='" + ifType + '\'' +
-              ", ifValue='" + ifValue + '\'' +
-              ", ifKeyState='" + ifKeyState + '\'' +
-              ", keepKeys=" + keepKeys +
-              '}';
+      return Objects.toStringHelper(this).add("operation", operation)
+          .add("fromType", fromType)
+          .add("fromKey", fromKey)
+          .add("toKey", toKey)
+          .add("deleteKey", deleteKey)
+          .add("preserveEdits",preserveEdits)
+          .add("defaultValue", defaultValue)
+          .add("coerceTo", coerceTo)
+          .add("ifKey", ifKey)
+          .add("ifType", ifType)
+          .add("ifValue", ifValue)
+          .add("ifKeyState", ifKeyState)
+          .add("keepKeys", keepKeys).omitNullValues().toString();
     }
   }
 
@@ -420,15 +438,13 @@ public class ConfigUpgradeChangeDefinition {
 
     @Override
     public String toString() {
-      return "Replace{" +
-              "key='" + key + '\'' +
-              ", find='" + find + '\'' +
-              ", replaceWith='" + replaceWith + '\'' +
-              ", ifKey='" + ifKey + '\'' +
-              ", ifType='" + ifType + '\'' +
-              ", ifValue='" + ifValue + '\'' +
-              ", ifKeyState='" + ifKeyState + '\'' +
-              '}';
+      return Objects.toStringHelper(this).add("key", key)
+          .add("find", find)
+          .add("replaceWith", replaceWith)
+          .add("ifKey", ifKey)
+          .add("ifType", ifType)
+          .add("ifValue", ifValue)
+          .add("ifKeyState", ifKeyState).omitNullValues().toString();
     }
   }
 
@@ -459,15 +475,13 @@ public class ConfigUpgradeChangeDefinition {
 
     @Override
     public String toString() {
-      return "RegexReplace{" +
-              "key='" + key + '\'' +
-              ", find='" + find + '\'' +
-              ", replaceWith='" + replaceWith + '\'' +
-              ", ifKey='" + ifKey + '\'' +
-              ", ifType='" + ifType + '\'' +
-              ", ifValue='" + ifValue + '\'' +
-              ", ifKeyState='" + ifKeyState + '\'' +
-              '}';
+      return Objects.toStringHelper(this).add("key", key)
+          .add("find", find)
+          .add("replaceWith",replaceWith)
+          .add("ifKey", ifKey)
+          .add("ifType", ifType)
+          .add("ifValue", ifValue)
+          .add("ifKeyState", ifKeyState).omitNullValues().toString();
     }
 
     /***
@@ -476,15 +490,85 @@ public class ConfigUpgradeChangeDefinition {
      */
     public Replace copyToReplaceObject(){
       Replace rep = new Replace();
-      rep.find = this.find;
-      rep.key = this.key;
-      rep.replaceWith = this.replaceWith;
-      rep.ifKey = this.ifKey;
-      rep.ifType = this.ifType;
-      rep.ifValue = this.ifValue;
-      rep.ifKeyState = this.ifKeyState;
+      rep.find = find;
+      rep.key = key;
+      rep.replaceWith = replaceWith;
+      rep.ifKey = ifKey;
+      rep.ifType = ifType;
+      rep.ifValue = ifValue;
+      rep.ifKeyState = ifKeyState;
 
       return rep;
     }
   }
-}
+
+  /**
+   * Used to replace strings in a key with other strings. More complex scenarios
+   * will be possible with regex (when needed). If the value specified in
+   * {@link Insert#value} already exists, then it is not inserted again.
+   */
+  @XmlAccessorType(XmlAccessType.FIELD)
+  @XmlType(name = "insert")
+  public static class Insert {
+    /**
+     * The key name
+     */
+    @XmlAttribute(name = "key", required = true)
+    public String key;
+
+    /**
+     * The value to insert.
+     */
+    @XmlAttribute(name = "value", required = true)
+    public String value;
+
+    /**
+     * The value to insert.
+     */
+    @XmlAttribute(name = "insert-type", required = true)
+    public InsertType insertType = InsertType.APPEND;
+
+    /**
+     * {@code true} to insert a new line before inserting the {@link #value}.
+     */
+    @XmlAttribute(name = "newline-before")
+    public boolean newlineBefore = false;
+
+    /**
+     * {@code true} to insert a new line after inserting the {@link #value}.
+     */
+    @XmlAttribute(name = "newline-after")
+    public boolean newlineAfter = false;
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public String toString() {
+      return Objects.toStringHelper(this).add("insertType", insertType)
+          .add("key", key)
+          .add("value",value)
+          .add("newlineBefore", newlineBefore)
+          .add("newlineAfter", newlineAfter).omitNullValues().toString();
+    }
+  }
+
+  /**
+   * The {@link InsertType} defines how to use the {@link Insert} directive.
+   */
+  @XmlEnum
+  public enum InsertType {
+    /**
+     * Prepend the content.
+     */
+    @XmlEnumValue("prepend")
+    PREPEND,
+
+    /**
+     * Append the content.
+     */
+    @XmlEnumValue("append")
+    APPEND
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/410f2943/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java
index f256eb0..b7be2ec 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ConfigureTask.java
@@ -30,10 +30,9 @@ import javax.xml.bind.annotation.XmlType;
 
 import org.apache.ambari.server.serveraction.upgrades.ConfigureAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.stack.ConfigUpgradePack;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.ConfigurationKeyValue;
+import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Insert;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Replace;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Transfer;
 import org.apache.commons.lang.StringUtils;
@@ -86,6 +85,12 @@ public class ConfigureTask extends ServerSideActionTask {
    */
   public static final String PARAMETER_REPLACEMENTS = "configure-task-replacements";
 
+  /**
+   * Insertions can be several per task, so they're passed in as a json-ified
+   * list of objects.
+   */
+  public static final String PARAMETER_INSERTIONS = "configure-task-insertions";
+
   public static final String actionVerb = "Configuring";
 
   /**
@@ -222,6 +227,12 @@ public class ConfigureTask extends ServerSideActionTask {
       configParameters.put(ConfigureTask.PARAMETER_REPLACEMENTS, m_gson.toJson(allowedReplacements));
     }
 
+    // inserts
+    List<Insert> insertions = definition.getInsertions();
+    if (!insertions.isEmpty()) {
+      configParameters.put(ConfigureTask.PARAMETER_INSERTIONS, m_gson.toJson(insertions));
+    }
+
     return configParameters;
   }
 
@@ -296,32 +307,4 @@ public class ConfigureTask extends ServerSideActionTask {
 
     return isValid;
   }
-
-  /**
-   * Gets the value of the specified cluster property.
-   *
-   * @param cluster
-   *          the cluster (not {@code null}).
-   * @param configType
-   *          the configuration type (ie hdfs-site) (not {@code null}).
-   * @param propertyKey
-   *          the key to retrieve (not {@code null}).
-   * @return the value or {@code null} if it does not exist.
-   */
-  private String getDesiredConfigurationValue(Cluster cluster,
-      String configType, String propertyKey) {
-
-    Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
-    DesiredConfig desiredConfig = desiredConfigs.get(configType);
-    if (null == desiredConfig) {
-      return null;
-    }
-
-    Config config = cluster.getConfig(configType, desiredConfig.getTag());
-    if (null == config) {
-      return null;
-    }
-
-    return config.getProperties().get(propertyKey);
-  }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/410f2943/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index d5dec43..73e3c38 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -256,5 +256,32 @@
     </component>
     </service>
 
+    <service name="HIVE">
+      <component name="HIVE_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_hive_append_heap_dump_options" summary="Appending optional Java heap dump parameters" >
+            <type>hive-env</type>
+            <insert key="content" value="export HADOOP_CLIENT_OPTS=&quot;$HADOOP_CLIENT_OPTS{{heap_dump_opts}}&quot;" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_tez_append_heap_dump_options">
+            <type>tez-site</type>
+            <insert key="tez.task.launch.cmd-opts" value="{{heap_dump_opts}}" insert-type="append" newline-before="false" newline-after="false" />
+          </definition>
+        </changes>
+      </component>
+      <component name="HIVE_SERVER_INTERACTIVE">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_hive_llap_append_heap_dump_options" summary="Appending optional Java heap dump parameters" >
+            <type>hive-interactive-env</type>
+            <insert key="content" value="export HADOOP_CLIENT_OPTS=&quot;$HADOOP_CLIENT_OPTS{{heap_dump_opts}}&quot;" insert-type="append" newline-before="true" newline-after="true" />
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_hive_llap_append_java_heap_dump_options">
+            <type>hive-interactive-env</type>
+            <insert key="llap_java_opts" value="{{heap_dump_opts}}" insert-type="append" newline-before="false" newline-after="false" />
+          </definition>
+        </changes>
+      </component>
+    </service>
+
   </services>
 </upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/410f2943/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 5ef959b..6c4da04 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -269,8 +269,7 @@
     <!-- After processing this group, will change the effective Stack of the UpgradeContext object. -->
     <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Target Stack">
       <execute-stage title="Update Target Stack">
-        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
-        </task>
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction"/>
       </execute-stage>
     </group>
 
@@ -402,6 +401,22 @@
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus">
         <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
       </execute-stage>
+      
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Appending heap dump options for Hive">
+        <task xsi:type="configure" id="hdp_2_6_0_0_hive_append_heap_dump_options"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Appending heap dump options for Tez">
+        <task xsi:type="configure" id="hdp_2_6_0_0_tez_append_heap_dump_options"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Appending heap dump options for HiveSever2 Interactive">
+        <task xsi:type="configure" id="hdp_2_6_0_0_hive_llap_append_heap_dump_options"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER_INTERACTIVE" title="Appending Java heap dump options for HiveSever2 Interactive">
+        <task xsi:type="configure" id="hdp_2_6_0_0_hive_llap_append_java_heap_dump_options"/>
+      </execute-stage>      
     </group>
 
     <!--

http://git-wip-us.apache.org/repos/asf/ambari/blob/410f2943/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index b13a6f0..7f9e986 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -764,12 +764,26 @@
       </component>
 
       <component name="HIVE_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdp_2_6_0_0_tez_append_heap_dump_options"/>
+          <task xsi:type="configure" id="hdp_2_6_0_0_hive_append_heap_dump_options"/>
+        </pre-upgrade>
+        
+        <pre-downgrade />
+        
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="HIVE_SERVER_INTERACTIVE">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdp_2_6_0_0_hive_llap_append_java_heap_dump_options"/>
+          <task xsi:type="configure" id="hdp_2_6_0_0_hive_llap_append_heap_dump_options"/>
+        </pre-upgrade>
+        
+        <pre-downgrade />
+      
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/410f2943/ambari-server/src/main/resources/upgrade-config.xsd
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade-config.xsd b/ambari-server/src/main/resources/upgrade-config.xsd
index 805b472..cee7c85 100644
--- a/ambari-server/src/main/resources/upgrade-config.xsd
+++ b/ambari-server/src/main/resources/upgrade-config.xsd
@@ -43,6 +43,13 @@
       <xs:enumeration value="yaml-array"/>
     </xs:restriction>
   </xs:simpleType>
+
+  <xs:simpleType name="insertion-type">
+    <xs:restriction base="xs:string">
+      <xs:enumeration value="prepend"/>
+      <xs:enumeration value="append"/>
+    </xs:restriction>
+  </xs:simpleType>
   
   <xs:complexType name="configure">
     <xs:sequence>
@@ -103,6 +110,15 @@
             <xs:attribute name="mask" use="optional" type="xs:boolean"/>
           </xs:complexType>
         </xs:element>
+        <xs:element name="insert" minOccurs="0" maxOccurs="unbounded">
+          <xs:complexType>
+            <xs:attribute name="key" use="required" type="xs:string"/>
+            <xs:attribute name="value" use="required" type="xs:string"/>
+            <xs:attribute name="insert-type" use="required" type="insertion-type"/>
+            <xs:attribute name="newline-before" use="optional" type="xs:boolean"/>
+            <xs:attribute name="newline-after" use="optional" type="xs:boolean"/>            
+          </xs:complexType>
+        </xs:element>
       </xs:choice>
     </xs:sequence>
     <xs:attribute name="id" use="required" type="xs:string"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/410f2943/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
index 92fa084..cd8327b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
@@ -38,7 +38,6 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
-import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
@@ -57,6 +56,8 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.ConfigurationKeyValue;
+import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Insert;
+import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.InsertType;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Replace;
 import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Transfer;
 import org.apache.ambari.server.state.stack.upgrade.ConfigureTask;
@@ -100,8 +101,6 @@ public class ConfigureActionTest {
   @Inject
   private Clusters clusters;
   @Inject
-  private ClusterVersionDAO clusterVersionDAO;
-  @Inject
   private ConfigFactory cf;
   @Inject
   private ConfigureAction action;
@@ -139,13 +138,13 @@ public class ConfigureActionTest {
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-    List<ConfigurationKeyValue> configurations = new ArrayList<ConfigurationKeyValue>();
+    List<ConfigurationKeyValue> configurations = new ArrayList<>();
     ConfigurationKeyValue keyValue = new ConfigurationKeyValue();
     configurations.add(keyValue);
     keyValue.key = "initLimit";
     keyValue.value = "11";
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -203,7 +202,7 @@ public class ConfigureActionTest {
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -272,7 +271,7 @@ public class ConfigureActionTest {
     keyValue.key = "initLimit";
     keyValue.value = "11";
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -402,14 +401,14 @@ public class ConfigureActionTest {
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
     commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
 
     // copy with coerce
-    List<Transfer> transfers = new ArrayList<Transfer>();
+    List<Transfer> transfers = new ArrayList<>();
     Transfer transfer = new Transfer();
     transfer.operation = TransferOperation.COPY;
     transfer.coerceTo = TransferCoercionType.YAML_ARRAY;
@@ -466,14 +465,14 @@ public class ConfigureActionTest {
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
     commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
 
     // Replacement task
-    List<Replace> replacements = new ArrayList<Replace>();
+    List<Replace> replacements = new ArrayList<>();
     Replace replace = new Replace();
     replace.key = "key_to_replace";
     replace.find = "New Cat";
@@ -538,14 +537,14 @@ public class ConfigureActionTest {
     c.addDesiredConfig("user", Collections.singleton(config));
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
     commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
 
     // Replacement task
-    List<Replace> replacements = new ArrayList<Replace>();
+    List<Replace> replacements = new ArrayList<>();
     Replace replace = new Replace();
     replace.key = "missing";
     replace.find = "foo";
@@ -596,7 +595,7 @@ public class ConfigureActionTest {
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
     // create several configurations
-    List<ConfigurationKeyValue> configurations = new ArrayList<ConfigurationKeyValue>();
+    List<ConfigurationKeyValue> configurations = new ArrayList<>();
     ConfigurationKeyValue fooKey2 = new ConfigurationKeyValue();
     configurations.add(fooKey2);
     fooKey2.key = "fooKey2";
@@ -608,7 +607,7 @@ public class ConfigureActionTest {
     fooKey3.value = "barValue3";
     fooKey3.mask = true;
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -662,7 +661,7 @@ public class ConfigureActionTest {
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
     // create several configurations
-    List<ConfigurationKeyValue> configurations = new ArrayList<ConfigurationKeyValue>();
+    List<ConfigurationKeyValue> configurations = new ArrayList<>();
     ConfigurationKeyValue fooKey1 = new ConfigurationKeyValue();
     configurations.add(fooKey1);
     fooKey1.key = "fooKey1";
@@ -698,7 +697,7 @@ public class ConfigureActionTest {
     fooKey5.ifKeyState= PropertyKeyState.ABSENT;
 
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -756,7 +755,7 @@ public class ConfigureActionTest {
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
     // create several configurations
-    List<ConfigurationKeyValue> configurations = new ArrayList<ConfigurationKeyValue>();
+    List<ConfigurationKeyValue> configurations = new ArrayList<>();
     ConfigurationKeyValue fooKey3 = new ConfigurationKeyValue();
     configurations.add(fooKey3);
     fooKey3.key = "fooKey3";
@@ -782,7 +781,7 @@ public class ConfigureActionTest {
     fooKey5.ifKeyState= PropertyKeyState.PRESENT;
 
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -839,7 +838,7 @@ public class ConfigureActionTest {
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
     // create several configurations
-    List<Replace> replacements = new ArrayList<Replace>();
+    List<Replace> replacements = new ArrayList<>();
     Replace replace = new Replace();
     replace.key = "replace.key.3";
     replace.find = "a";
@@ -876,7 +875,7 @@ public class ConfigureActionTest {
     replace4.ifKeyState = PropertyKeyState.ABSENT;
     replacements.add(replace4);
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -931,7 +930,7 @@ public class ConfigureActionTest {
     assertEquals(2, c.getConfigsByType("zoo.cfg").size());
 
     // create several configurations
-    List<Replace> replacements = new ArrayList<Replace>();
+    List<Replace> replacements = new ArrayList<>();
 
     Replace replace2 = new Replace();
     replacements.add(replace2);
@@ -963,7 +962,7 @@ public class ConfigureActionTest {
     replace4.ifKeyState = PropertyKeyState.PRESENT;
     replacements.add(replace4);
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -1018,7 +1017,7 @@ public class ConfigureActionTest {
     keyValue.key = "initLimit";
     keyValue.value = "11";
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -1131,7 +1130,7 @@ public class ConfigureActionTest {
     keyValue.key = "initLimit";
     keyValue.value = "11";
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -1226,7 +1225,7 @@ public class ConfigureActionTest {
     keyValue.key = "initLimit";
     keyValue.value = "11";
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -1333,7 +1332,7 @@ public class ConfigureActionTest {
     keyValue.key = "initLimit";
     keyValue.value = "11";
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -1434,7 +1433,7 @@ public class ConfigureActionTest {
     keyValue.key = "initLimit";
     keyValue.value = "11";
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -1532,7 +1531,7 @@ public class ConfigureActionTest {
     keyValue.key = "initLimit";
     keyValue.value = "11";
 
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put("upgrade_direction", "upgrade");
     commandParams.put("version", HDP_2_2_0_1);
     commandParams.put("clusterName", "c1");
@@ -1600,6 +1599,94 @@ public class ConfigureActionTest {
     }
   }
 
+  /**
+   * Tests using the {@code <insert/>} element in a configuration upgrade pack.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testInsert() throws Exception {
+    makeUpgradeCluster();
+
+    Cluster c = clusters.getCluster("c1");
+    assertEquals(1, c.getConfigsByType("zoo.cfg").size());
+
+    c.setDesiredStackVersion(HDP_220_STACK);
+    Config config = cf.createNew(c, "zoo.cfg", "version2", new HashMap<String, String>() {
+      {
+        put("key_to_append", "append");
+        put("key_to_prepend", "prepend");
+      }
+    }, new HashMap<String, Map<String, String>>());
+
+    c.addDesiredConfig("user", Collections.singleton(config));
+    assertEquals(2, c.getConfigsByType("zoo.cfg").size());
+
+    Map<String, String> commandParams = new HashMap<>();
+    commandParams.put("upgrade_direction", "upgrade");
+    commandParams.put("version", HDP_2_2_0_1);
+    commandParams.put("clusterName", "c1");
+    commandParams.put(ConfigureTask.PARAMETER_CONFIG_TYPE, "zoo.cfg");
+
+    // define the changes
+    final String prependValue = "This should be on a newline";
+    final String appendValue = " this will be after...";
+
+    // insert tasks
+    List<Insert> insertions = new ArrayList<>();
+
+    Insert prepend = new Insert();
+    prepend.insertType = InsertType.PREPEND;
+    prepend.key = "key_to_prepend";
+    prepend.value = prependValue;
+    prepend.newlineBefore = false;
+    prepend.newlineAfter = true;
+
+    Insert append = new Insert();
+    append.insertType = InsertType.APPEND;
+    append.key = "key_to_append";
+    append.value = appendValue;
+    append.newlineBefore = false;
+    append.newlineAfter = false;
+
+    // add them to the list
+    insertions.add(prepend);
+    insertions.add(append);
+
+    // just for fun, add them again - this will test their idempotence
+    insertions.add(prepend);
+    insertions.add(append);
+
+    commandParams.put(ConfigureTask.PARAMETER_INSERTIONS, new Gson().toJson(insertions));
+
+    ExecutionCommand executionCommand = new ExecutionCommand();
+    executionCommand.setCommandParams(commandParams);
+    executionCommand.setClusterName("c1");
+    executionCommand.setRoleParams(new HashMap<String, String>());
+    executionCommand.getRoleParams().put(ServerAction.ACTION_USER_NAME, "username");
+
+    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
+    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
+    action.setExecutionCommand(executionCommand);
+    action.setHostRoleCommand(hostRoleCommand);
+
+    CommandReport report = action.execute(null);
+    assertNotNull(report);
+
+    assertEquals(3, c.getConfigsByType("zoo.cfg").size());
+
+    config = c.getDesiredConfigByType("zoo.cfg");
+    assertNotNull(config);
+    assertFalse("version2".equals(config.getTag()));
+
+    // build the expected values
+    String expectedPrepend = prependValue + System.lineSeparator() + "prepend";
+    String expectedAppend = "append" + appendValue;
+
+    assertEquals(expectedPrepend, config.getProperties().get("key_to_prepend"));
+    assertEquals(expectedAppend, config.getProperties().get("key_to_append"));
+  }
+
   private void makeUpgradeCluster() throws Exception {
     String clusterName = "c1";
     String hostName = "h1";
@@ -1632,7 +1719,7 @@ public class ConfigureActionTest {
 
     Host host = clusters.getHost(hostName);
 
-    Map<String, String> hostAttributes = new HashMap<String, String>();
+    Map<String, String> hostAttributes = new HashMap<>();
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);


[32/50] [abbrv] ambari git commit: AMBARI-19598. (1). Set the calculated value for config 'num_llap_nodes', and (2). Remove 'Tech Preview' string for HSI from HDP 2.6 onwards.

Posted by nc...@apache.org.
AMBARI-19598. (1). Set the calculated value for config 'num_llap_nodes', and (2). Remove 'Tech Preview' string for HSI from HDP 2.6 onwards.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b8ef3ad1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b8ef3ad1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b8ef3ad1

Branch: refs/heads/branch-dev-patch-upgrade
Commit: b8ef3ad1bb65b64feeea1d21491b1cd6506f20c1
Parents: c916dda
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Tue Jan 17 12:53:08 2017 -0800
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Tue Jan 17 13:30:51 2017 -0800

----------------------------------------------------------------------
 .../stacks/HDP/2.5/services/stack_advisor.py    |  1 +
 .../HIVE/configuration/hive-interactive-env.xml | 22 ++++++++++
 .../stacks/2.5/common/test_stack_advisor.py     | 45 ++++++--------------
 3 files changed, 35 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b8ef3ad1/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index afe9fea..04ada3e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1123,6 +1123,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       Logger.info("User requested num_llap_nodes : {0}, but used/adjusted value for calculations is : {1}".format(num_llap_nodes_requested, num_llap_nodes))
     else:
       Logger.info("Used num_llap_nodes for calculations : {0}".format(num_llap_nodes_requested))
+    putHiveInteractiveEnvProperty('num_llap_nodes', num_llap_nodes)
 
     llap_container_size = long(llap_daemon_mem_per_node)
     putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', llap_container_size)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b8ef3ad1/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
index a8c2415..6a68335 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
@@ -21,6 +21,28 @@
 -->
 <configuration supports_adding_forbidden="true">
   <property>
+    <name>enable_hive_interactive</name>
+    <value>false</value>
+    <description>Enable or disable interactive query in this cluster.</description>
+    <display-name>Enable Interactive Query</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <overridable>false</overridable>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
     <name>llap_java_opts</name>
     <value>-XX:+AlwaysPreTouch {% if java_version > 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}{{heap_dump_opts}}</value>
     <description>Java opts for llap application</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b8ef3ad1/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index d4a301c..790c6a7 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -1165,7 +1165,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1')
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'minimum': '1', 'maximum': '1'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
 
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '9216')
@@ -1364,9 +1364,7 @@ class TestHDP25StackAdvisor(TestCase):
 
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '1'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '9548')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '1')
@@ -1559,9 +1557,7 @@ class TestHDP25StackAdvisor(TestCase):
 
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '1'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '48128')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '1')
@@ -1762,9 +1758,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1')
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '1.0', 'minimum': '1'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '38912')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '4')
@@ -1961,9 +1955,8 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1')
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'minimum': '1', 'maximum': '1'})
     self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler': 'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=0.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=0.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.root.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.mini
 mum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=100.0\nyarn.scheduler.capacity.root.llap.capacity=100.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
 
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '11594')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '3')
@@ -2159,9 +2152,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler': 'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=0.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=0.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.root.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.mini
 mum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=100.0\nyarn.scheduler.capacity.root.llap.capacity=100.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '1'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '202752')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '3')
@@ -2364,9 +2355,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1.0')
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '2.0', 'minimum': '1'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 5)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '36864')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '4')
@@ -2563,9 +2552,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '2.0')
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '5.0', 'minimum': '1'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '204259')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '10')
@@ -2758,9 +2745,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEqual(configurations['capacity-scheduler']['properties'], {'capacity-scheduler': 'yarn.scheduler.capacity.root.accessible-node-labels=*\nyarn.scheduler.capacity.maximum-am-resource-percent=1\nyarn.scheduler.capacity.node-locality-delay=40\nyarn.scheduler.capacity.root.capacity=100\nyarn.scheduler.capacity.root.default.state=RUNNING\nyarn.scheduler.capacity.root.default.maximum-capacity=0.0\nyarn.scheduler.capacity.root.queues=default,llap\nyarn.scheduler.capacity.maximum-applications=10000\nyarn.scheduler.capacity.root.default.user-limit-factor=1\nyarn.scheduler.capacity.root.acl_administer_queue=*\nyarn.scheduler.capacity.root.default.acl_submit_applications=*\nyarn.scheduler.capacity.root.default.capacity=0.0\nyarn.scheduler.capacity.queue-mappings-override.enable=false\nyarn.scheduler.capacity.root.llap.user-limit-factor=1\nyarn.scheduler.capacity.root.llap.state=RUNNING\nyarn.scheduler.capacity.root.llap.ordering-policy=fifo\nyarn.scheduler.capacity.root.llap.mini
 mum-user-limit-percent=100\nyarn.scheduler.capacity.root.llap.maximum-capacity=100.0\nyarn.scheduler.capacity.root.llap.capacity=100.0\nyarn.scheduler.capacity.root.llap.acl_submit_applications=hive\nyarn.scheduler.capacity.root.llap.acl_administer_queue=hive\nyarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'})
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '1.0'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '202752')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '3')
@@ -2951,9 +2936,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertTrue('capacity-scheduler' not in configurations)
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '1.0'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '202752')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '3')
@@ -3175,9 +3158,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1')
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '1.0', 'minimum': '1'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '9728')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '3')
@@ -3777,9 +3758,7 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue'], '1')
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '1', 'minimum': '1'})
 
-    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
-
-
+    self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 1)
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '200704')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '3')


[10/50] [abbrv] ambari git commit: AMBARI-19566. Hive View 2.0: Export Query Result (pallavkul)

Posted by nc...@apache.org.
AMBARI-19566. Hive View 2.0: Export Query Result (pallavkul)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8b22dd01
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8b22dd01
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8b22dd01

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 8b22dd01811759bef9a325773fa9c95862bac57c
Parents: c326ce4
Author: pallavkul <pa...@gmail.com>
Authored: Tue Jan 17 16:20:02 2017 +0530
Committer: pallavkul <pa...@gmail.com>
Committed: Tue Jan 17 16:20:02 2017 +0530

----------------------------------------------------------------------
 .../src/main/resources/ui/app/adapters/job.js   |  7 ++-
 .../ui/app/components/export-result.js          | 54 ++++++++++++++++++++
 .../ui/app/components/query-result-table.js     | 34 ++++++++++--
 .../resources/ui/app/components/save-hdfs.js    | 52 -------------------
 .../resources/ui/app/routes/queries/query.js    | 32 ++++++++++--
 .../src/main/resources/ui/app/services/query.js |  8 +++
 .../app/templates/components/export-result.hbs  | 46 +++++++++++++++++
 .../templates/components/query-result-table.hbs | 20 +++++++-
 .../ui/app/templates/components/save-hdfs.hbs   | 37 --------------
 .../ui/app/templates/queries/query.hbs          | 14 ++---
 10 files changed, 194 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/adapters/job.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/adapters/job.js b/contrib/views/hive20/src/main/resources/ui/app/adapters/job.js
index e2f342e..9fc73a4 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/adapters/job.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/adapters/job.js
@@ -30,8 +30,13 @@ export default ApplicationAdapter.extend({
   },
 
   saveToHDFS(jobId, path){
-    let resultUrl = this.urlForFindRecord(jobId, 'job').replace('/resources','')  + "/results/csv/saveToHDFS?commence=true&file=" + path + ".csv";
+    let resultUrl = this.urlForFindRecord(jobId, 'job') + "/results/csv/saveToHDFS?commence=true&file=" + path + ".csv";
     return this.ajax(resultUrl, 'GET');
+  },
+
+  downloadAsCsv(jobId, path){
+    let resultUrl = this.urlForFindRecord(jobId, 'job') + "/results/csv/?fileName=" + path + ".csv";
+    return resultUrl;
   }
 
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/components/export-result.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/components/export-result.js b/contrib/views/hive20/src/main/resources/ui/app/components/export-result.js
new file mode 100644
index 0000000..517da1e
--- /dev/null
+++ b/contrib/views/hive20/src/main/resources/ui/app/components/export-result.js
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+
+  label: null,
+  confirmText: 'Confirm',
+  rejectText: 'Reject',
+  isExportResultSuccessMessege:false,
+  isExportResultFailureMessege:false,
+
+  jobId: 0,
+
+  pathName: Ember.computed('jobId', function() {
+    return 'Worksheet_' + this.get('jobId');
+  }),
+
+  labelIcon: null,
+  confirmIcon: null,
+  rejectIcon: null,
+
+  closable: true,
+  titleClass: 'primary',
+  confirmClass: 'primary',
+  rejectClass: 'default',
+
+  actions: {
+    confirm() {
+      let pathName = $("input.path-name").val();
+      this.sendAction('confirm', this.get('jobId'), pathName);
+    },
+
+    reject() {
+      this.sendAction('reject');
+    }
+  }
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/components/query-result-table.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/components/query-result-table.js b/contrib/views/hive20/src/main/resources/ui/app/components/query-result-table.js
index 429dfb3..919127f 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/components/query-result-table.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/components/query-result-table.js
@@ -67,7 +67,12 @@ export default Ember.Component.extend({
 
   showSaveHdfsModal:false,
 
-  showDownloadCsvModal:false,
+  showDownloadCsvModal: false,
+
+  isExportResultSuccessMessege:false,
+
+  isSaveHdfsErrorMessege:false,
+
 
   actions: {
     onScrolledToBottom() {
@@ -88,20 +93,39 @@ export default Ember.Component.extend({
     },
 
     openSaveHdfsModal(){
-      this.set('showSaveHdfsModal',true)
+      this.set('showSaveHdfsModal',true);
+      this.set('isExportResultSuccessMessege',false);
+      this.set('isExportResultFailureMessege',false);
     },
 
     closeSaveHdfsModal(){
-      this.set('showSaveHdfsModal',false)
+      this.set('showSaveHdfsModal',false);
+      this.set('isExportResultSuccessMessege',false);
+      this.set('isExportResultFailureMessege',false);
+    },
+
+    openDownloadCsvModal(){
+      this.set('showDownloadCsvModal',true);
+      this.set('isExportResultSuccessMessege',false);
+      this.set('isExportResultFailureMessege',false);
+    },
+
+    closeDownloadCsvModal(){
+      this.set('showDownloadCsvModal',false);
+      this.set('isExportResultSuccessMessege',false);
+      this.set('isExportResultFailureMessege',false);
     },
 
     saveToHDFS(jobId, pathName){
       console.log('saveToHDFS with jobId == ', jobId );
       console.log('saveToHDFS with pathName == ', pathName );
-
       this.sendAction('saveToHDFS', jobId,  pathName);
+    },
 
-
+    downloadAsCsv(jobId, pathName){
+      console.log('downloadAsCsv with jobId == ', jobId );
+      console.log('downloadAsCsv with pathName == ', pathName );
+      this.sendAction('downloadAsCsv', jobId,  pathName);
     }
 
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/components/save-hdfs.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/components/save-hdfs.js b/contrib/views/hive20/src/main/resources/ui/app/components/save-hdfs.js
deleted file mode 100644
index 05b3b33..0000000
--- a/contrib/views/hive20/src/main/resources/ui/app/components/save-hdfs.js
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import Ember from 'ember';
-
-export default Ember.Component.extend({
-
-  label: null,
-  confirmText: 'Confirm',
-  rejectText: 'Reject',
-
-  jobId: 0,
-
-  savePathName: Ember.computed('jobId', function() {
-    return 'Worksheet_' + this.get('jobId');
-  }),
-
-  labelIcon: null,
-  confirmIcon: null,
-  rejectIcon: null,
-
-  closable: true,
-  titleClass: 'primary',
-  confirmClass: 'primary',
-  rejectClass: 'default',
-
-  actions: {
-    confirm() {
-      let pathName = $("input.path-name").val();
-      this.sendAction('confirm', this.get('jobId'), pathName);
-    },
-
-    reject() {
-      this.sendAction('reject');
-    }
-  }
-});

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
index d4c82fa..606b71a 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/routes/queries/query.js
@@ -81,6 +81,10 @@ export default Ember.Route.extend({
     controller.set('queryResult', model.get('queryResult'));
     controller.set('currentJobId', null);
 
+    controller.set('isExportResultSuccessMessege', false);
+    controller.set('isExportResultFailureMessege', false);
+    controller.set('showSaveHdfsModal', false);
+
   },
 
 
@@ -348,13 +352,35 @@ export default Ember.Route.extend({
 
       this.get('query').saveToHDFS(jobId, path)
          .then((data) => {
-            Ember.run.later(() => {
-              console.log('successfully saveToHDFS', data);
-            }, 2 * 1000);
+           console.log('successfully saveToHDFS', data);
+           this.get('controller').set('isExportResultSuccessMessege', true);
+           this.get('controller').set('isExportResultFailureMessege', false);
+
+           Ember.run.later(() => {
+             this.get('controller').set('showSaveHdfsModal', false);
+           }, 2 * 1000);
 
           }, (error) => {
             console.log("Error encountered", error);
+            this.get('controller').set('isExportResultFailureMessege', true);
+            this.get('controller').set('isExportResultSuccessMessege', false);
+
+            Ember.run.later(() => {
+               this.get('controller').set('showSaveHdfsModal', false);
+             }, 2 * 1000);
+
           });
+    },
+
+    downloadAsCsv(jobId, path){
+
+      console.log('downloadAsCsv query route with jobId == ', jobId);
+      console.log('downloadAsCsv query route with path == ', path);
+
+      let downloadAsCsvUrl = this.get('query').downloadAsCsv(jobId, path) || '';
+
+      this.get('controller').set('showDownloadCsvModal', false);
+      window.open(downloadAsCsvUrl);
 
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/services/query.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/services/query.js b/contrib/views/hive20/src/main/resources/ui/app/services/query.js
index d95a2e5..400f78b 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/services/query.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/services/query.js
@@ -51,7 +51,15 @@ export default Ember.Service.extend({
           reject(err);
       });
     });
+  },
+
+
+  downloadAsCsv(jobId, path){
+    let self = this;
+    return this.get('store').adapterFor('job').downloadAsCsv(jobId, path);
+
   }
 
 
+
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/templates/components/export-result.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/export-result.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/export-result.hbs
new file mode 100644
index 0000000..0b462cb
--- /dev/null
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/export-result.hbs
@@ -0,0 +1,46 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+{{#modal-dialog
+  close="reject"
+  translucentOverlay=true
+  clickOutsideToClose=true
+  container-class="modal-dialog"}}
+  <div class="modal-content">
+    <div class="modal-header text-{{titleClass}}">
+        <button type="button" class="close" {{action "reject"}}><span aria-hidden="true">&times;</span></button>
+    </div>
+    <div class="modal-body">
+      <p class="lead">{{#if labelIcon}}{{fa-icon labelIcon size="lg"}}{{/if}} {{label}}</p>
+      {{input class="form-control path-name" type="text" value=pathName}}
+      <div>
+        {{#if isExportResultSuccessMessege}}
+          <span class="text-success">Successfully done.</span>
+        {{/if}}
+        {{#if isExportResultFailureMessege}}
+          <span class="text-danger">Error encountered.</span>
+        {{/if}}
+        &nbsp;
+      </div>
+    </div>
+    <div class="modal-footer">
+      <button type="button" class="btn btn-{{rejectClass}}" {{action "reject"}}>{{#if rejectIcon}}{{fa-icon rejectIcon}} {{/if}}{{rejectText}}</button>
+      <button type="button" class="btn btn-{{confirmClass}}" {{action "confirm"}}>{{#if confirmIcon}}{{fa-icon confirmIcon}} {{/if}}{{confirmText}}</button>
+    </div>
+  </div>
+{{/modal-dialog}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/templates/components/query-result-table.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/query-result-table.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/query-result-table.hbs
index 4bda978..4fe6f43 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/query-result-table.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/components/query-result-table.hbs
@@ -59,7 +59,7 @@
 </div>
 
 {{#if showSaveHdfsModal}}
-  {{save-hdfs
+  {{export-result
   confirmText="OK"
   rejectText="CLOSE"
   label="Please enter save path and name."
@@ -71,11 +71,27 @@
   confirmClass="success"
   confirm="saveToHDFS"
   reject="closeSaveHdfsModal"
+  isExportResultSuccessMessege=isExportResultSuccessMessege
+  isExportResultFailureMessege=isExportResultFailureMessege
   }}
 {{/if}}
 
 {{#if showDownloadCsvModal}}
-Yahoo
+  {{export-result
+  confirmText="OK"
+  rejectText="CLOSE"
+  label="Download results as CSV"
+  jobId=jobId
+  labelIcon="download"
+  rejectIcon="times"
+  confirmIcon="check"
+  closable=false
+  confirmClass="success"
+  confirm="downloadAsCsv"
+  reject="closeDownloadCsvModal"
+  isExportResultSuccessMessege=isExportResultSuccessMessege
+  isExportResultFailureMessege=isExportResultFailureMessege
+  }}
 {{/if}}
 
 {{yield}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/templates/components/save-hdfs.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/components/save-hdfs.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/components/save-hdfs.hbs
deleted file mode 100644
index 5b53102..0000000
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/components/save-hdfs.hbs
+++ /dev/null
@@ -1,37 +0,0 @@
-{{!
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-}}
-
-{{#modal-dialog
-  close="reject"
-  translucentOverlay=true
-  clickOutsideToClose=true
-  container-class="modal-dialog"}}
-  <div class="modal-content">
-    <div class="modal-header text-{{titleClass}}">
-        <button type="button" class="close" {{action "reject"}}><span aria-hidden="true">&times;</span></button>
-    </div>
-    <div class="modal-body">
-      <p class="lead">{{#if labelIcon}}{{fa-icon labelIcon size="lg"}}{{/if}} {{label}}</p>
-      {{input class="form-control path-name" type="text" value=savePathName}}
-    </div>
-    <div class="modal-footer">
-      <button type="button" class="btn btn-{{rejectClass}}" {{action "reject"}}>{{#if rejectIcon}}{{fa-icon rejectIcon}} {{/if}}{{rejectText}}</button>
-      <button type="button" class="btn btn-{{confirmClass}}" {{action "confirm"}}>{{#if confirmIcon}}{{fa-icon confirmIcon}} {{/if}}{{confirmText}}</button>
-    </div>
-  </div>
-{{/modal-dialog}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b22dd01/contrib/views/hive20/src/main/resources/ui/app/templates/queries/query.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/templates/queries/query.hbs b/contrib/views/hive20/src/main/resources/ui/app/templates/queries/query.hbs
index 120a045..2138356 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/templates/queries/query.hbs
+++ b/contrib/views/hive20/src/main/resources/ui/app/templates/queries/query.hbs
@@ -46,6 +46,10 @@
       goPrevPage='goPrevPage'
       expandQueryResultPanel='expandQueryResultPanel'
       saveToHDFS='saveToHDFS'
+      downloadAsCsv='downloadAsCsv'
+      isExportResultSuccessMessege=isExportResultSuccessMessege
+      isExportResultFailureMessege=isExportResultFailureMessege
+      showSaveHdfsModal=showSaveHdfsModal
       }}
     </div>
   </div>
@@ -91,7 +95,6 @@
 </div>
 
 {{#if showWorksheetModal}}
-
   {{#modal-dialog translucentOverlay=true clickOutsideToClose=true container-class="modal-dialog  modal-sm"}}
     <div class="modal-content">
       <div class="modal-header">
@@ -105,15 +108,12 @@
               {{input type="text" class="form-control" id="worksheet-title" placeholder="Title" value=worksheet.title }}
             </div>
           </div>
-
           {{#if worksheetModalSuccess }}
             <div class="text-success">Successfully Saved.</div>
           {{/if}}
           {{#if worksheetModalFail }}
             <div class="text-danger">Error</div>
           {{/if}}
-
-
         </div>
       </div>
 
@@ -123,12 +123,6 @@
       </div>
     </div>
   {{/modal-dialog}}
-
-
 {{/if}}
 
-
-
-
-
 {{outlet}}


[26/50] [abbrv] ambari git commit: AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)

Posted by nc...@apache.org.
AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c689096d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c689096d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c689096d

Branch: refs/heads/branch-dev-patch-upgrade
Commit: c689096d13c62beafe1eb52a8aa9a4a8c4d9cd63
Parents: 37baf9a
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Jan 17 12:18:06 2017 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Jan 17 12:18:06 2017 -0800

----------------------------------------------------------------------
 .../YARN/2.1.0.2.0/kerberos.json                |   12 +-
 .../YARN/2.1.0.2.0/kerberos.json.orig           |  216 ++
 .../YARN/3.0.0.3.0/kerberos.json                |   12 +-
 .../YARN/3.0.0.3.0/kerberos.json.orig           |  280 ++
 .../stacks/HDP/2.2/services/YARN/kerberos.json  |   12 +-
 .../HDP/2.2/services/YARN/kerberos.json.orig    |  217 ++
 .../HDP/2.3.ECS/services/YARN/kerberos.json     |   12 +-
 .../2.3.ECS/services/YARN/kerberos.json.orig    |  220 ++
 .../stacks/HDP/2.3/services/YARN/kerberos.json  |   12 +-
 .../HDP/2.3/services/YARN/kerberos.json.orig    |  226 ++
 .../stacks/HDP/2.5/services/YARN/kerberos.json  |   12 +-
 .../HDP/2.5/services/YARN/kerberos.json.orig    |  280 ++
 .../stacks/PERF/1.0/services/YARN/kerberos.json |   12 +-
 .../PERF/1.0/services/YARN/kerberos.json.orig   |  278 ++
 .../2.2/configs/pig-service-check-secure.json   |   12 +-
 .../configs/pig-service-check-secure.json.orig  |  651 ++++
 .../test_kerberos_descriptor_2_1_3.json         |   12 +-
 .../test_kerberos_descriptor_2_1_3.json.orig    | 1320 ++++++++
 .../data/stacks/HDP-2.1/service_components.json |   12 +-
 .../stacks/HDP-2.1/service_components.json.orig | 3170 ++++++++++++++++++
 .../app/data/configs/wizards/secure_mapping.js  |   12 +-
 21 files changed, 6924 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
index 6b61c13..c8b5989 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json
@@ -23,13 +23,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json.orig b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json.orig
new file mode 100644
index 0000000..6b61c13
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/kerberos.json.orig
@@ -0,0 +1,216 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "false",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
+            "hadoop.registry.secure" : "true",
+            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
index d334887..fb85e7a 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json.orig b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json.orig
new file mode 100644
index 0000000..d334887
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/kerberos.json.orig
@@ -0,0 +1,280 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
+            "hadoop.registry.secure" : "true",
+            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
+              },
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "llap_zk_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            },
+            {
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
index ad30b76..85a3221 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json
@@ -23,13 +23,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json.orig
new file mode 100644
index 0000000..ad30b76
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/kerberos.json.orig
@@ -0,0 +1,217 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure",
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
+            "hadoop.registry.secure" : "true",
+            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
index 7977941..e27513a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
@@ -26,13 +26,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json.orig
new file mode 100644
index 0000000..7977941
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json.orig
@@ -0,0 +1,220 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/ECS/hdfs"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "false",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
+            "hadoop.registry.secure" : "true",
+            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.yarn.groups": "*",
+            "hadoop.proxyuser.yarn.hosts": "${yarn-site/yarn.resourcemanager.hostname}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/ECS/hdfs"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
index 73addb1..bf0280b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json.orig
new file mode 100644
index 0000000..73addb1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/YARN/kerberos.json.orig
@@ -0,0 +1,226 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
+            "hadoop.registry.secure" : "true",
+            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
index d334887..fb85e7a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
             "hadoop.registry.secure" : "true",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json.orig
new file mode 100644
index 0000000..d334887
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json.orig
@@ -0,0 +1,280 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.zk-acl" : "sasl:rm:rwcda",
+            "hadoop.registry.secure" : "true",
+            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
+              },
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "llap_zk_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            },
+            {
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
index 7e74237..2735323 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json
@@ -24,13 +24,13 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure"
           }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json.orig b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json.orig
new file mode 100644
index 0000000..7e74237
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/services/YARN/kerberos.json.orig
@@ -0,0 +1,278 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore-secure"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
+              },
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "llap_zk_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            },
+            {
+              "name": "/YARN/RESOURCEMANAGER/resource_manager_rm",
+              "principal": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
index f14eb52..0ac9e78 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json
@@ -402,7 +402,7 @@
             "yarn.log-aggregation-enable": "true", 
             "yarn.nodemanager.delete.debug-delay-sec": "0", 
             "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore", 
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "", 
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
             "yarn.timeline-service.client.retry-interval-ms": "1000", 
             "hadoop.registry.zk.quorum": "c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181", 
             "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
@@ -424,7 +424,7 @@
             "yarn.nodemanager.resource.memory-mb": "2048", 
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "", 
             "yarn.nodemanager.resource.cpu-vcores": "1", 
-            "yarn.resourcemanager.proxyusers.*.users": "", 
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.timeline-service.ttl-ms": "2678400000", 
             "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100", 
             "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000", 
@@ -433,7 +433,7 @@
             "yarn.nodemanager.log.retain-seconds": "604800",
             "yarn.timeline-service.http-authentication.type": "kerberos", 
             "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
-            "yarn.resourcemanager.proxyusers.*.groups": "", 
+            "yarn.resourcemanager.proxyuser.*.groups": "",
             "yarn.timeline-service.client.max-retries": "30", 
             "yarn.nodemanager.health-checker.interval-ms": "135000", 
             "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
@@ -445,7 +445,7 @@
             "yarn.client.nodemanager-connect.max-wait-ms": "60000", 
             "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", 
             "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", 
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "", 
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
             "yarn.timeline-service.http-authentication.signer.secret.provider": "", 
             "yarn.resourcemanager.bind-host": "0.0.0.0", 
             "yarn.http.policy": "HTTP_ONLY", 
@@ -463,7 +463,7 @@
             "hadoop.registry.rm.enabled": "false", 
             "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", 
             "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", 
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "", 
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.nodemanager.log-aggregation.compression-type": "gz", 
             "yarn.timeline-service.http-authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
             "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", 
@@ -478,7 +478,7 @@
             "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", 
             "yarn.resourcemanager.connect.retry-interval.ms": "30000", 
             "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000", 
-            "yarn.resourcemanager.proxyusers.*.hosts": ""
+            "yarn.resourcemanager.proxyuser.*.hosts": ""
         }, 
         "capacity-scheduler": {
             "yarn.scheduler.capacity.default.minimum-user-limit-percent": "100", 


[27/50] [abbrv] ambari git commit: Revert "AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)"

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c916dda5/ambari-web/app/data/configs/wizards/secure_mapping.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/configs/wizards/secure_mapping.js b/ambari-web/app/data/configs/wizards/secure_mapping.js
index 2d24628..8d952e8 100644
--- a/ambari-web/app/data/configs/wizards/secure_mapping.js
+++ b/ambari-web/app/data/configs/wizards/secure_mapping.js
@@ -854,7 +854,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.timeline-service.http-authentication.proxyuser.*.hosts',
+    "name": 'yarn.timeline-service.http-authentication.proxyusers.*.hosts',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -862,7 +862,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.timeline-service.http-authentication.proxyuser.*.users',
+    "name": 'yarn.timeline-service.http-authentication.proxyusers.*.users',
     "value": "",
     "serviceName": "YARN",
     "templateName": [],
@@ -870,7 +870,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.timeline-service.http-authentication.proxyuser.*.groups',
+    "name": 'yarn.timeline-service.http-authentication.proxyusers.*.groups',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -958,7 +958,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.resourcemanager.proxyuser.*.hosts',
+    "name": 'yarn.resourcemanager.proxyusers.*.hosts',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -966,7 +966,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.resourcemanager.proxyuser.*.users',
+    "name": 'yarn.resourcemanager.proxyusers.*.users',
     "value": "",
     "templateName": [],
     "foreignKey": null,
@@ -974,7 +974,7 @@ var yarn22Mapping = [
     "filename": "yarn-site.xml"
   },
   {
-    "name": 'yarn.resourcemanager.proxyuser.*.groups',
+    "name": 'yarn.resourcemanager.proxyusers.*.groups',
     "value": "",
     "templateName": [],
     "foreignKey": null,


[49/50] [abbrv] ambari git commit: AMBARI-19611. Add UI UT for alerts (onechiporenko)

Posted by nc...@apache.org.
AMBARI-19611. Add UI UT for alerts (onechiporenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d7f1e8c0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d7f1e8c0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d7f1e8c0

Branch: refs/heads/branch-dev-patch-upgrade
Commit: d7f1e8c00e574e5612f40e286b9795bd1e531259
Parents: 61d0f64
Author: Oleg Nechiporenko <on...@apache.org>
Authored: Wed Jan 18 13:58:43 2017 +0200
Committer: Oleg Nechiporenko <on...@apache.org>
Committed: Wed Jan 18 15:35:20 2017 +0200

----------------------------------------------------------------------
 ambari-web/app/assets/test/tests.js             |   1 +
 .../alerts/definition_details_controller.js     |  24 ++-
 .../alerts/manage_alert_groups_controller.js    | 194 ++++++++-----------
 .../manage_alert_notifications_controller.js    | 106 +++++-----
 .../app/mappers/alert_definitions_mapper.js     |  16 +-
 ambari-web/app/models/alerts/alert_config.js    |   4 +-
 ambari-web/app/models/alerts/alert_group.js     |   2 +-
 .../app/models/alerts/alert_notification.js     |   4 +-
 .../manage_alert_groups_controller_test.js      |  57 ++++++
 ...anage_alert_notifications_controller_test.js |  10 +
 .../test/models/alerts/alert_config_test.js     |  21 ++
 .../test/models/alerts/alert_group_test.js      |  25 +++
 .../models/alerts/alert_notification_test.js    |  50 +++++
 13 files changed, 313 insertions(+), 201 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/app/assets/test/tests.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/test/tests.js b/ambari-web/app/assets/test/tests.js
index e42c811..d47d558 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -390,6 +390,7 @@ var files = [
   'test/models/alerts/alert_definition_test',
   'test/models/alerts/alert_group_test',
   'test/models/alerts/alert_instance_test',
+  'test/models/alerts/alert_notification_test',
   'test/models/authentication_test',
   'test/models/client_component_test',
   'test/models/cluster_states_test',

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/app/controllers/main/alerts/definition_details_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/alerts/definition_details_controller.js b/ambari-web/app/controllers/main/alerts/definition_details_controller.js
index ea9b5b7..55d7e2b 100644
--- a/ambari-web/app/controllers/main/alerts/definition_details_controller.js
+++ b/ambari-web/app/controllers/main/alerts/definition_details_controller.js
@@ -105,10 +105,9 @@ App.MainAlertDefinitionDetailsController = Em.Controller.extend({
     var lastDayAlertsCount = {};
     data.items.forEach(function (alert) {
       if (!lastDayAlertsCount[alert.AlertHistory.host_name]) {
-        lastDayAlertsCount[alert.AlertHistory.host_name] = 1;
-      } else {
-        lastDayAlertsCount[alert.AlertHistory.host_name] += 1;
+        lastDayAlertsCount[alert.AlertHistory.host_name] = 0;
       }
+      lastDayAlertsCount[alert.AlertHistory.host_name]++;
     });
     this.set('lastDayAlertsCount', lastDayAlertsCount);
   },
@@ -149,14 +148,14 @@ App.MainAlertDefinitionDetailsController = Em.Controller.extend({
     element.set('isEditing', false);
 
     var data = Em.Object.create({});
-    var property_name = "AlertDefinition/" + element.get('name');
-    data.set(property_name, element.get('value'));
-    var alertDefinition_id = this.get('content.id');
+    var propertyName = "AlertDefinition/" + element.get('name');
+    data.set(propertyName, element.get('value'));
+    var alertDefinitionId = this.get('content.id');
     return App.ajax.send({
       name: 'alerts.update_alert_definition',
       sender: this,
       data: {
-        id: alertDefinition_id,
+        id: alertDefinitionId,
         data: data
       }
     });
@@ -180,10 +179,9 @@ App.MainAlertDefinitionDetailsController = Em.Controller.extend({
 
   /**
    * "Delete" button handler
-   * @param {object} event
    * @method deleteAlertDefinition
    */
-  deleteAlertDefinition: function (event) {
+  deleteAlertDefinition: function () {
     var alertDefinition = this.get('content');
     var self = this;
     App.showConfirmationPopup(function () {
@@ -229,7 +227,7 @@ App.MainAlertDefinitionDetailsController = Em.Controller.extend({
       confirmButton: alertDefinition.get('enabled') ? Em.I18n.t('alerts.table.state.enabled.confirm.btn') : Em.I18n.t('alerts.table.state.disabled.confirm.btn')
     });
 
-    return App.showConfirmationFeedBackPopup(function (query) {
+    return App.showConfirmationFeedBackPopup(function () {
       self.toggleDefinitionState(alertDefinition);
     }, bodyMessage);
   },
@@ -286,14 +284,14 @@ App.MainAlertDefinitionDetailsController = Em.Controller.extend({
       header: Em.I18n.t('alerts.actions.editRepeatTolerance.header'),
       primary: Em.I18n.t('common.save'),
       secondary: Em.I18n.t('common.cancel'),
-      inputValue: self.get('content.repeat_tolerance_enabled') ? (self.get('content.repeat_tolerance') || 1) : alertsRepeatTolerance,
+      inputValue: self.get('content.repeat_tolerance_enabled') ? self.get('content.repeat_tolerance') || 1 : alertsRepeatTolerance,
       errorMessage: Em.I18n.t('alerts.actions.editRepeatTolerance.error'),
       isInvalid: function () {
         var intValue = Number(this.get('inputValue'));
         return this.get('inputValue') !== 'DEBUG' && (!validator.isValidInt(intValue) || intValue < 1 || intValue > 99);
       }.property('inputValue'),
       isChanged: function () {
-        return Number(this.get('inputValue')) != alertsRepeatTolerance;
+        return Number(this.get('inputValue')) !== alertsRepeatTolerance;
       }.property('inputValue'),
       doRestoreDefaultValue: function () {
         this.set('inputValue', alertsRepeatTolerance);
@@ -306,7 +304,7 @@ App.MainAlertDefinitionDetailsController = Em.Controller.extend({
         }
         var input = this.get('inputValue');
         self.set('content.repeat_tolerance', input);
-        self.enableRepeatTolerance(input != alertsRepeatTolerance);
+        self.enableRepeatTolerance(input !== alertsRepeatTolerance);
         App.ajax.send({
           name: 'alerts.update_alert_definition',
           sender: self,

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js b/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js
index a501a34..ea649b8 100644
--- a/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js
+++ b/ambari-web/app/controllers/main/alerts/manage_alert_groups_controller.js
@@ -20,6 +20,35 @@ var App = require('app');
 
 var validator = require('utils/validator');
 
+function stringify(obj, property) {
+  return JSON.stringify(obj.get(property).slice().sort());
+}
+
+function groupsAreNotEqual(group1, group2) {
+  return stringify(group1, 'definitions') !== stringify(group2, 'definitions') ||
+  stringify(group1, 'notifications') !== stringify(group2, 'notifications');
+}
+
+function mapToEmObjects(collection, fields, renamedFields) {
+  var _renamedFields = arguments.length === 3 ? renamedFields : [];
+  return collection.map(function (item) {
+    var ret = Em.Object.create(Em.getProperties(item, fields));
+    _renamedFields.forEach(function (renamedField) {
+      var [realName, newName] = renamedField.split(':');
+      Em.set(ret, newName, Em.get(item, realName));
+    });
+    return ret;
+  });
+}
+
+var AlertGroupClone = Em.Object.extend({
+  displayName: function () {
+    var name = App.config.truncateGroupName(this.get('name'));
+    return this.get('default') ? name + ' Default' : name;
+  }.property('name', 'default'),
+  label: Em.computed.format('{0} ({1})', 'displayName', 'definitions.length')
+});
+
 App.ManageAlertGroupsController = Em.Controller.extend({
 
   name: 'manageAlertGroupsController',
@@ -60,15 +89,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
    * @type {App.AlertNotification[]}
    */
   alertNotifications: function () {
-    return this.get('isLoaded') ? App.AlertNotification.find().map(function (target) {
-      return Em.Object.create({
-        name: target.get('name'),
-        id: target.get('id'),
-        description: target.get('description'),
-        type: target.get('type'),
-        global: target.get('global')
-      });
-    }) : [];
+    return this.get('isLoaded') ? mapToEmObjects(App.AlertNotification.find(), ['id', 'name', 'description', 'type', 'global']) : [];
   }.property('isLoaded'),
 
   /**
@@ -97,8 +118,8 @@ App.ManageAlertGroupsController = Em.Controller.extend({
    */
   isDeleteDefinitionsDisabled: function () {
     var selectedGroup = this.get('selectedAlertGroup');
-    return selectedGroup ? (selectedGroup.default || this.get('selectedDefinitions').length === 0) : true;
-  }.property('selectedAlertGroup', 'selectedAlertGroup.definitions.length', 'selectedDefinitions.length'),
+    return selectedGroup ? selectedGroup.default || this.get('selectedDefinitions').length === 0 : true;
+  }.property('selectedAlertGroup.definitions.length', 'selectedDefinitions.length'),
 
   /**
    * observes if any group changed including: group name, newly created group, deleted group, group with definitions/notifications changed
@@ -120,7 +141,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
    */
   defsModifiedAlertGroupsObs: function() {
     Em.run.once(this, this.defsModifiedAlertGroupsObsOnce);
-  }.observes('selectedAlertGroup.definitions.@each', 'selectedAlertGroup.definitions.length', 'selectedAlertGroup.notifications.@each', 'selectedAlertGroup.notifications.length', 'alertGroups', 'isLoaded'),
+  }.observes('selectedAlertGroup.definitions.[]', 'selectedAlertGroup.notifications.[]', 'alertGroups', 'isLoaded'),
 
   /**
    * Update <code>defsModifiedAlertGroups</code>-value
@@ -130,7 +151,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
    */
   defsModifiedAlertGroupsObsOnce: function() {
     if (!this.get('isLoaded')) {
-      return false;
+      return;
     }
     var groupsToDelete = [];
     var groupsToSet = [];
@@ -147,12 +168,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
       var originalGroup = mappedOriginalGroups[group.get('id')];
       if (originalGroup) {
         // should update definitions or notifications
-        if (JSON.stringify(group.get('definitions').slice().sort()) !== JSON.stringify(originalGroup.get('definitions').slice().sort())
-          || JSON.stringify(group.get('notifications').slice().sort()) !== JSON.stringify(originalGroup.get('notifications').slice().sort())) {
-          groupsToSet.push(group.set('id', originalGroup.get('id')));
-        }
-        else
-        if (group.get('name') !== originalGroup.get('name')) {
+        if (groupsAreNotEqual(group, originalGroup) || group.get('name') !== originalGroup.get('name')) {
           // should update name
           groupsToSet.push(group.set('id', originalGroup.get('id')));
         }
@@ -222,47 +238,24 @@ App.ManageAlertGroupsController = Em.Controller.extend({
    */
   loadAlertGroups: function () {
     var alertGroups = App.AlertGroup.find().map(function (group) {
-      var definitions = group.get('definitions').map(function (def) {
-        return Em.Object.create({
-          name: def.get('name'),
-          serviceName: def.get('serviceName'),
-          componentName: def.get('componentName'),
-          serviceNameDisplay: def.get('service.displayName'),
-          componentNameDisplay: def.get('componentNameFormatted'),
-          label: def.get('label'),
-          id: def.get('id')
-        });
-      });
-
-      var targets = group.get('targets').map(function (target) {
-        return Em.Object.create({
-          name: target.get('name'),
-          id: target.get('id'),
-          description: target.get('description'),
-          type: target.get('type'),
-          global: target.get('global')
-        });
-      });
-
-      return Em.Object.create({
-        id: group.get('id'),
-        name: group.get('name'),
-        default: group.get('default'),
-        displayName: function () {
-          var name = App.config.truncateGroupName(this.get('name'));
-          return this.get('default') ? name + ' Default' : name;
-        }.property('name', 'default'),
-        label: Em.computed.format('{0} ({1})', 'displayName', 'definitions.length'),
-        definitions: definitions,
-        isAddDefinitionsDisabled: group.get('isAddDefinitionsDisabled'),
-        notifications: targets
-      });
+      var definitions = mapToEmObjects(
+        group.get('definitions'),
+        ['name', 'serviceName', 'componentName', 'label', 'id'],
+        ['service.displayName:serviceNameDisplay', 'componentNameFormatted:componentNameDisplay']
+      );
+
+      var targets = mapToEmObjects(group.get('targets'), ['name', 'id', 'description', 'type', 'global']);
+
+      var hash = Em.getProperties(group, ['id', 'name', 'default', 'isAddDefinitionsDisabled']);
+      hash.definitions = definitions;
+      hash.notifications = targets;
+      return AlertGroupClone.create(hash);
     });
     this.setProperties({
       alertGroups: alertGroups,
       isLoaded: true,
       originalAlertGroups: this.copyAlertGroups(alertGroups),
-      selectedAlertGroup: this.get('alertGroups')[0]
+      selectedAlertGroup: alertGroups[0]
     });
   },
 
@@ -332,22 +325,16 @@ App.ManageAlertGroupsController = Em.Controller.extend({
     selectedAlertGroup.get('definitions').forEach(function (def) {
       usedDefinitionsMap[def.name] = true;
     });
-    sharedDefinitions.forEach(function (shared_def) {
-      if (!usedDefinitionsMap[shared_def.get('name')]) {
-        availableDefinitions.pushObject(shared_def);
+    sharedDefinitions.forEach(function (sharedDef) {
+      if (!usedDefinitionsMap[sharedDef.get('name')]) {
+        availableDefinitions.pushObject(sharedDef);
       }
     });
-    return availableDefinitions.map(function (def) {
-      return Em.Object.create({
-        name: def.get('name'),
-        serviceName: def.get('serviceName'),
-        componentName: def.get('componentName'),
-        serviceNameDisplay: def.get('service.displayName'),
-        componentNameDisplay: def.get('componentNameFormatted'),
-        label: def.get('label'),
-        id: def.get('id')
-      });
-    });
+    return mapToEmObjects(
+      availableDefinitions,
+      ['name', 'serviceName', 'componentName', 'label', 'id'],
+      ['service.displayName:serviceNameDisplay', 'componentNameFormatted:componentNameDisplay']
+    );
   },
 
   /**
@@ -359,10 +346,6 @@ App.ManageAlertGroupsController = Em.Controller.extend({
       return false;
     }
     var availableDefinitions = this.getAvailableDefinitions(this.get('selectedAlertGroup'));
-    var popupDescription = {
-      header: Em.I18n.t('alerts.actions.manage_alert_groups_popup.selectDefsDialog.title'),
-      dialogMessage: Em.I18n.t('alerts.actions.manage_alert_groups_popup.selectDefsDialog.message').format(this.get('selectedAlertGroup.displayName'))
-    };
     var validComponents = App.StackServiceComponent.find().map(function (component) {
       return Em.Object.create({
         componentName: component.get('componentName'),
@@ -377,7 +360,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
         selected: false
       });
     });
-    this.launchDefsSelectionDialog(availableDefinitions, [], validServices, validComponents, this.addDefinitionsCallback.bind(this), popupDescription);
+    return this.launchDefsSelectionDialog(availableDefinitions, [], validServices, validComponents);
   },
 
   /**
@@ -385,20 +368,20 @@ App.ManageAlertGroupsController = Em.Controller.extend({
    * @method launchDefsSelectionDialog
    * @return {App.ModalPopup}
    */
-  launchDefsSelectionDialog: function (initialDefs, selectedDefs, validServices, validComponents, callback, popupDescription) {
-
+  launchDefsSelectionDialog: function (initialDefs, selectedDefs, validServices, validComponents) {
+    var self = this;
     return App.ModalPopup.show({
 
       classNames: [ 'common-modal-wrapper' ],
 
       modalDialogClasses: ['modal-lg'],
 
-      header: popupDescription.header,
+      header: Em.I18n.t('alerts.actions.manage_alert_groups_popup.selectDefsDialog.title'),
 
       /**
        * @type {string}
        */
-      dialogMessage: popupDescription.dialogMessage,
+      dialogMessage: Em.I18n.t('alerts.actions.manage_alert_groups_popup.selectDefsDialog.message').format(this.get('selectedAlertGroup.displayName')),
 
       /**
        * @type {string|null}
@@ -417,7 +400,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
           this.set('warningMessage', Em.I18n.t('alerts.actions.manage_alert_groups_popup.selectDefsDialog.message.warning'));
           return;
         }
-        callback(arrayOfSelectedDefs);
+        self.addDefinitionsCallback(arrayOfSelectedDefs);
         this.hide();
       },
 
@@ -428,7 +411,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
       disablePrimary: Em.computed.not('isLoaded'),
 
       onSecondary: function () {
-        callback(null);
+        self.addDefinitionsCallback(null);
         this.hide();
       },
 
@@ -481,7 +464,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
   postNewAlertGroup: function (newAlertGroupData, callback) {
     // create a new group with name , definition and notifications
     var data = {
-      'name': newAlertGroupData.get('name')
+      name: newAlertGroupData.get('name')
     };
     if (newAlertGroupData.get('definitions').length > 0) {
       data.definitions = newAlertGroupData.get('definitions').mapProperty('id');
@@ -523,10 +506,10 @@ App.ManageAlertGroupsController = Em.Controller.extend({
     var sendData = {
       name: 'alert_groups.update',
       data: {
-        "group_id": alertGroup.id,
-        'name': alertGroup.get('name'),
-        'definitions': alertGroup.get('definitions').mapProperty('id'),
-        'targets': alertGroup.get('notifications').mapProperty('id')
+        group_id: alertGroup.id,
+        name: alertGroup.get('name'),
+        definitions: alertGroup.get('definitions').mapProperty('id'),
+        targets: alertGroup.get('notifications').mapProperty('id')
       },
       success: 'successFunction',
       error: 'errorFunction',
@@ -556,7 +539,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
     var sendData = {
       name: 'alert_groups.delete',
       data: {
-        "group_id": alertGroup.id
+        group_id: alertGroup.id
       },
       success: 'successFunction',
       error: 'errorFunction',
@@ -631,24 +614,19 @@ App.ManageAlertGroupsController = Em.Controller.extend({
        * @type {string|null}
        */
       warningMessage: function () {
-        var warningMessage = '';
         var originalGroup = self.get('selectedAlertGroup');
         var groupName = this.get('alertGroupName').trim();
 
         if (originalGroup.get('name').trim() === groupName) {
-          warningMessage = Em.I18n.t("alerts.actions.manage_alert_groups_popup.addGroup.exist");
+          return Em.I18n.t("alerts.actions.manage_alert_groups_popup.addGroup.exist");
+        }
+        if (self.get('alertGroups').mapProperty('displayName').contains(groupName)) {
+          return Em.I18n.t("alerts.actions.manage_alert_groups_popup.addGroup.exist");
         }
-        else {
-          if (self.get('alertGroups').mapProperty('displayName').contains(groupName)) {
-            warningMessage = Em.I18n.t("alerts.actions.manage_alert_groups_popup.addGroup.exist");
-          }
-          else {
-            if (groupName && !validator.isValidAlertGroupName(groupName)) {
-              warningMessage = Em.I18n.t("form.validator.alertGroupName");
-            }
-          }
+        if (groupName && !validator.isValidAlertGroupName(groupName)) {
+          return Em.I18n.t("form.validator.alertGroupName");
         }
-        return warningMessage;
+        return '';
       }.property('alertGroupName'),
 
       /**
@@ -672,7 +650,7 @@ App.ManageAlertGroupsController = Em.Controller.extend({
    * @method addAlertGroup
    */
   addAlertGroup: function (duplicated) {
-    duplicated = (duplicated === true);
+    duplicated = duplicated === true;
     var self = this;
     var popup = App.ModalPopup.show({
 
@@ -696,17 +674,14 @@ App.ManageAlertGroupsController = Em.Controller.extend({
        * @type {string}
        */
       warningMessage: function () {
-        var warningMessage = '';
         var groupName = this.get('alertGroupName').trim();
         if (self.get('alertGroups').mapProperty('displayName').contains(groupName)) {
-          warningMessage = Em.I18n.t("alerts.actions.manage_alert_groups_popup.addGroup.exist");
+          return Em.I18n.t("alerts.actions.manage_alert_groups_popup.addGroup.exist");
         }
-        else {
-          if (groupName && !validator.isValidAlertGroupName(groupName)) {
-            warningMessage = Em.I18n.t("form.validator.alertGroupName");
-          }
+        if (groupName && !validator.isValidAlertGroupName(groupName)) {
+          return Em.I18n.t("form.validator.alertGroupName");
         }
-        return warningMessage;
+        return '';
       }.property('alertGroupName'),
 
       /**
@@ -716,14 +691,9 @@ App.ManageAlertGroupsController = Em.Controller.extend({
       disablePrimary: Em.computed.or('alertGroupNameIsEmpty', 'warningMessage'),
 
       onPrimary: function () {
-        var newAlertGroup = Em.Object.create({
+        var newAlertGroup = AlertGroupClone.create({
           name: this.get('alertGroupName').trim(),
           default: false,
-          displayName: function () {
-            var name = App.config.truncateGroupName(this.get('name'));
-            return this.get('default') ? name + ' Default' : name;
-          }.property('name', 'default'),
-          label: Em.computed.format('{0} ({1})', 'displayName', 'definitions.length'),
           definitions: duplicated ? self.get('selectedAlertGroup.definitions').slice(0) : [],
           notifications: self.get('alertGlobalNotifications'),
           isAddDefinitionsDisabled: false

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js b/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
index 8501678..f470f08 100644
--- a/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
+++ b/ambari-web/app/controllers/main/alerts/manage_alert_notifications_controller.js
@@ -211,22 +211,23 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
   newCustomProperty: {name: '', value: ''},
 
   /**
+   * @type {string[]}
+   */
+  customPropertyNames: Em.computed.mapBy('inputFields.customProperties', 'name'),
+
+  /**
    * Check if custom property name exists in the <code>inputFields.customProperties</code>
    *
    * @type {boolean}
    */
-  isNewCustomPropertyExists: function () {
-    return this.get('inputFields.customProperties').mapProperty('name').contains(this.get('newCustomProperty.name'));
-  }.property('newCustomProperty.name'),
+  isNewCustomPropertyExists: Em.computed.existsInByKey('newCustomProperty.name', 'customPropertyNames'),
 
   /**
    * Check if custom property name exists in the <code>ignoredCustomProperties</code>
    *
    * @type {boolean}
    */
-  isNewCustomPropertyIgnored: function () {
-    return this.get('ignoredCustomProperties').contains(this.get('newCustomProperty.name'));
-  }.property('newCustomProperty.name'),
+  isNewCustomPropertyIgnored: Em.computed.existsInByKey('newCustomProperty.name', 'ignoredCustomProperties'),
 
   /**
    * Check if custom property name is valid according to the <code>validator.isValidConfigKey</code>
@@ -403,24 +404,25 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
     var inputFields = this.get('inputFields');
     var selectedAlertNotification = this.get('selectedAlertNotification');
     var methodValue = this.getNotificationTypeText(selectedAlertNotification.get('type'));
+      var properties = selectedAlertNotification.get('properties');
     inputFields.set('name.value', (addCopyToName ? 'Copy of ' : '') + selectedAlertNotification.get('name'));
     inputFields.set('groups.value', selectedAlertNotification.get('groups').toArray());
-    inputFields.set('email.value', selectedAlertNotification.get('properties')['ambari.dispatch.recipients'] ?
-      selectedAlertNotification.get('properties')['ambari.dispatch.recipients'].join(', ') : '');
-    inputFields.set('SMTPServer.value', selectedAlertNotification.get('properties')['mail.smtp.host']);
-    inputFields.set('SMTPPort.value', selectedAlertNotification.get('properties')['mail.smtp.port']);
-    inputFields.set('SMTPUseAuthentication.value', selectedAlertNotification.get('properties')['mail.smtp.auth'] !== "false");
-    inputFields.set('SMTPUsername.value', selectedAlertNotification.get('properties')['ambari.dispatch.credential.username']);
-    inputFields.set('SMTPPassword.value', selectedAlertNotification.get('properties')['ambari.dispatch.credential.password']);
-    inputFields.set('retypeSMTPPassword.value', selectedAlertNotification.get('properties')['ambari.dispatch.credential.password']);
-    inputFields.set('SMTPSTARTTLS.value', selectedAlertNotification.get('properties')['mail.smtp.starttls.enable'] !== "false");
-    inputFields.set('emailFrom.value', selectedAlertNotification.get('properties')['mail.smtp.from']);
-    inputFields.set('version.value', selectedAlertNotification.get('properties')['ambari.dispatch.snmp.version']);
-    inputFields.set('OIDs.value', selectedAlertNotification.get('properties')['ambari.dispatch.snmp.oids.trap']);
-    inputFields.set('community.value', selectedAlertNotification.get('properties')['ambari.dispatch.snmp.community']);
-    inputFields.set('host.value', selectedAlertNotification.get('properties')['ambari.dispatch.recipients'] ?
-      selectedAlertNotification.get('properties')['ambari.dispatch.recipients'].join(', ') : '');
-    inputFields.set('port.value', selectedAlertNotification.get('properties')['ambari.dispatch.snmp.port']);
+    inputFields.set('email.value', properties['ambari.dispatch.recipients'] ?
+      properties['ambari.dispatch.recipients'].join(', ') : '');
+    inputFields.set('SMTPServer.value', properties['mail.smtp.host']);
+    inputFields.set('SMTPPort.value', properties['mail.smtp.port']);
+    inputFields.set('SMTPUseAuthentication.value', properties['mail.smtp.auth'] !== "false");
+    inputFields.set('SMTPUsername.value', properties['ambari.dispatch.credential.username']);
+    inputFields.set('SMTPPassword.value', properties['ambari.dispatch.credential.password']);
+    inputFields.set('retypeSMTPPassword.value', properties['ambari.dispatch.credential.password']);
+    inputFields.set('SMTPSTARTTLS.value', properties['mail.smtp.starttls.enable'] !== "false");
+    inputFields.set('emailFrom.value', properties['mail.smtp.from']);
+    inputFields.set('version.value', properties['ambari.dispatch.snmp.version']);
+    inputFields.set('OIDs.value', properties['ambari.dispatch.snmp.oids.trap']);
+    inputFields.set('community.value', properties['ambari.dispatch.snmp.community']);
+    inputFields.set('host.value', properties['ambari.dispatch.recipients'] ?
+      properties['ambari.dispatch.recipients'].join(', ') : '');
+    inputFields.set('port.value', properties['ambari.dispatch.snmp.port']);
     inputFields.set('severityFilter.value', selectedAlertNotification.get('alertStates'));
     inputFields.set('global.value', selectedAlertNotification.get('global'));
     inputFields.set('allGroups.value', selectedAlertNotification.get('global') ? 'all' : 'custom');
@@ -429,7 +431,6 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
     inputFields.set('description.value', selectedAlertNotification.get('description'));
     inputFields.set('method.value', methodValue);
     inputFields.get('customProperties').clear();
-    var properties = selectedAlertNotification.get('properties');
     var ignoredCustomProperties = this.get('ignoredCustomProperties');
     Em.keys(properties).forEach(function (k) {
       if (ignoredCustomProperties.contains(k)) return;
@@ -502,34 +503,18 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
           if (newName && !this.get('currentName')) {
             this.set('currentName', newName);
           }
-          if (isEdit) {
-            // edit current alert notification
-            if (!newName) {
-              this.set('nameError', true);
-              errorMessage = Em.I18n.t('alerts.actions.manage_alert_notifications_popup.error.name.empty');
-            } else if (newName && newName !== this.get('currentName') && self.get('alertNotifications').mapProperty('name').contains(newName)) {
-              this.set('nameError', true);
-              errorMessage = Em.I18n.t('alerts.actions.manage_alert_notifications_popup.error.name.existed');
-            } else if (newName && !validator.isValidAlertNotificationName(newName)){
-              this.set('nameError', true);
-              errorMessage = Em.I18n.t('form.validator.alertNotificationName');
-            } else {
-              this.set('nameError', false);
-            }
+          var nameExistsCondition = isEdit ? newName && newName !== this.get('currentName') : !!newName;
+          if (!newName) {
+            this.set('nameError', true);
+            errorMessage = Em.I18n.t('alerts.actions.manage_alert_notifications_popup.error.name.empty');
+          } else if (nameExistsCondition && self.get('alertNotifications').mapProperty('name').contains(newName)) {
+            this.set('nameError', true);
+            errorMessage = Em.I18n.t('alerts.actions.manage_alert_notifications_popup.error.name.existed');
+          } else if (newName && !validator.isValidAlertNotificationName(newName)){
+            this.set('nameError', true);
+            errorMessage = Em.I18n.t('form.validator.alertNotificationName');
           } else {
-            // add new alert notification
-            if (!newName) {
-              this.set('nameError', true);
-              errorMessage = Em.I18n.t('alerts.actions.manage_alert_notifications_popup.error.name.empty');
-            } else if (newName && self.get('alertNotifications').mapProperty('name').contains(newName)) {
-              this.set('nameError', true);
-              errorMessage = Em.I18n.t('alerts.actions.manage_alert_notifications_popup.error.name.existed');
-            } else if (newName && !validator.isValidAlertNotificationName(newName)){
-              this.set('nameError', true);
-              errorMessage = Em.I18n.t('form.validator.alertNotificationName');
-            } else {
-              this.set('nameError', false);
-            }
+            this.set('nameError', false);
           }
           this.set('controller.inputFields.name.errorMsg', errorMessage);
         }.observes('controller.inputFields.name.value'),
@@ -656,7 +641,7 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
          * @method selectAllGroups
          */
         selectAllGroups: function () {
-          if (this.get('controller.inputFields.allGroups.value') == 'custom') {
+          if (this.get('controller.inputFields.allGroups.value') === 'custom') {
             this.set('groupSelect.selection', this.get('groupSelect.content').slice());
           }
         },
@@ -666,7 +651,7 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
          * @method clearAllGroups
          */
         clearAllGroups: function () {
-          if (this.get('controller.inputFields.allGroups.value') == 'custom') {
+          if (this.get('controller.inputFields.allGroups.value') === 'custom') {
             this.set('groupSelect.selection', []);
           }
         },
@@ -737,6 +722,10 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
         } else {
           self.createAlertNotification(apiObject);
         }
+      },
+      hide: function () {
+        self.set('createEditPopup', null);
+        return this._super(...arguments);
       }
     });
     this.set('createEditPopup', createEditPopup);
@@ -797,9 +786,9 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
 
   getNotificationType: function(text) {
     var notificationType = text;
-    if(notificationType == "Custom SNMP") {
+    if(notificationType === "Custom SNMP") {
       notificationType = "SNMP";
-    } else if(notificationType == "SNMP") {
+    } else if(notificationType === "SNMP") {
       notificationType = "AMBARI_SNMP";
     }
     return notificationType;
@@ -807,9 +796,9 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
 
   getNotificationTypeText: function(notificationType) {
     var notificationTypeText = notificationType;
-    if(notificationType == "SNMP") {
+    if(notificationType === "SNMP") {
       notificationTypeText = "Custom SNMP";
-    } else if(notificationType == "AMBARI_SNMP") {
+    } else if(notificationType === "AMBARI_SNMP") {
       notificationTypeText = "SNMP";
     }
     return notificationTypeText;
@@ -936,14 +925,13 @@ App.ManageAlertNotificationsController = Em.Controller.extend({
    * @method enableOrDisableAlertNotification
    */
   enableOrDisableAlertNotification: function (e) {
-    var enabled = (e.context === "disable")?false:true;
     return App.ajax.send({
       name: 'alerts.update_alert_notification',
       sender: this,
       data: {
         data: {
-          "AlertTarget": {
-            "enabled": enabled
+          AlertTarget: {
+            enabled: e.context !== "disable"
           }
         },
         id: this.get('selectedAlertNotification.id')

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/app/mappers/alert_definitions_mapper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mappers/alert_definitions_mapper.js b/ambari-web/app/mappers/alert_definitions_mapper.js
index cd4e738..c400b89 100644
--- a/ambari-web/app/mappers/alert_definitions_mapper.js
+++ b/ambari-web/app/mappers/alert_definitions_mapper.js
@@ -125,19 +125,11 @@ App.alertDefinitionsMapper = App.QuickDataMapper.create({
 
         var convertedParameters = [];
         var sourceParameters = item.AlertDefinition.source.parameters;
-        if (Array.isArray(sourceParameters)) {
+        if (Ember.isArray(sourceParameters)) {
           sourceParameters.forEach(function (parameter) {
-            convertedParameters.push({
-              id: item.AlertDefinition.id + parameter.name,
-              name: parameter.name,
-              display_name: parameter.display_name,
-              units: parameter.units,
-              value: parameter.value,
-              description: parameter.description,
-              type: parameter.type,
-              threshold: parameter.threshold,
-              visibility: parameter.visibility
-            });
+            let hash = Em.getProperties(parameter, ['name', 'display_name', 'units', 'value', 'description', 'type', 'threshold', 'visibility']);
+            hash.id = item.AlertDefinition.id + parameter.name;
+            convertedParameters.push(hash);
           });
         }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/app/models/alerts/alert_config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/alerts/alert_config.js b/ambari-web/app/models/alerts/alert_config.js
index 1af02fc..a951826 100644
--- a/ambari-web/app/models/alerts/alert_config.js
+++ b/ambari-web/app/models/alerts/alert_config.js
@@ -179,7 +179,7 @@ App.AlertConfigProperties = {
     displayType: 'select',
     apiProperty: 'service_name',
     apiFormattedValue: function () {
-      return this.get('value') == 'CUSTOM' ? this.get('value') : App.StackService.find().findProperty('displayName', this.get('value')).get('serviceName');
+      return this.get('value') === 'CUSTOM' ? this.get('value') : App.StackService.find().findProperty('displayName', this.get('value')).get('serviceName');
     }.property('value'),
     change: function () {
       this.set('property.value', true);
@@ -193,7 +193,7 @@ App.AlertConfigProperties = {
     displayType: 'select',
     apiProperty: 'component_name',
     apiFormattedValue: function () {
-      return this.get('value') == 'No component' ? this.get('value') : App.StackServiceComponent.find().findProperty('displayName', this.get('value')).get('componentName');
+      return this.get('value') === 'No component' ? this.get('value') : App.StackServiceComponent.find().findProperty('displayName', this.get('value')).get('componentName');
     }.property('value')
   }),
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/app/models/alerts/alert_group.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/alerts/alert_group.js b/ambari-web/app/models/alerts/alert_group.js
index 4f40ccf..49a850f 100644
--- a/ambari-web/app/models/alerts/alert_group.js
+++ b/ambari-web/app/models/alerts/alert_group.js
@@ -56,7 +56,7 @@ App.AlertGroup = DS.Model.extend({
    */
   displayName: function () {
     var name = App.config.truncateGroupName(this.get('name'));
-    return this.get('default') ? (name + ' Default') : name;
+    return this.get('default') ? name + ' Default' : name;
   }.property('name', 'default'),
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/app/models/alerts/alert_notification.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/alerts/alert_notification.js b/ambari-web/app/models/alerts/alert_notification.js
index e503c6b..edfbff9 100644
--- a/ambari-web/app/models/alerts/alert_notification.js
+++ b/ambari-web/app/models/alerts/alert_notification.js
@@ -26,8 +26,8 @@ App.AlertNotification = DS.Model.extend({
   groups: DS.hasMany('App.AlertGroup'),
   global: DS.attr('boolean'),
   enabled: DS.attr('boolean'),
-  displayName: Ember.computed('enabled', function() {
-    return (this.get('enabled') === true)?this.get('name'): this.get('name') + ' (Disabled)';
+  displayName: Ember.computed('enabled', 'name', function() {
+    return this.get('name') + (this.get('enabled') === true ? '' : ' (Disabled)');
   }),
 
   properties: {},

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/test/controllers/main/alerts/manage_alert_groups_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/alerts/manage_alert_groups_controller_test.js b/ambari-web/test/controllers/main/alerts/manage_alert_groups_controller_test.js
index 45c2a4c..2c58017 100644
--- a/ambari-web/test/controllers/main/alerts/manage_alert_groups_controller_test.js
+++ b/ambari-web/test/controllers/main/alerts/manage_alert_groups_controller_test.js
@@ -202,5 +202,62 @@ describe('App.ManageAlertGroupsController', function () {
 
   });
 
+  App.TestAliases.testAsComputedAnd(getController(), 'isDefsModified', ['isLoaded', 'isDefsModifiedAlertGroups']);
+
+  App.TestAliases.testAsComputedOr(getController(), 'isDefsModifiedAlertGroups', ['defsModifiedAlertGroups.toSet.length', 'defsModifiedAlertGroups.toCreate.length', 'defsModifiedAlertGroups.toDelete.length'])
+
+  describe('#addAlertGroup', function () {
+
+    function getAppGroupPopup() {
+      var c = getController();
+      c.addAlertGroup();
+      return c.get('addGroupPopup');
+    }
+
+    App.TestAliases.testAsComputedOr(getAppGroupPopup(), 'disablePrimary', ['alertGroupNameIsEmpty', 'warningMessage']);
+
+  });
+
+  describe('#renameAlertGroup', function () {
+
+    function getRenamePopup() {
+      var c = getController();
+      c.renameAlertGroup();
+      return c.get('renameGroupPopup');
+    }
+
+    App.TestAliases.testAsComputedOr(getRenamePopup(), 'disablePrimary', ['alertGroupNameIsEmpty', 'warningMessage']);
+
+  });
+
+  describe('#alertNotifications', function () {
+    var alertNotifications;
+    beforeEach(function () {
+      sinon.stub(App.AlertNotification, 'find').returns([
+        Em.Object.create({id: 1, name: 'n1', description: 'n1d', type: 'EMAIL', global: true}),
+        Em.Object.create({id: 2, name: 'n2', description: 'n2d', type: 'SNMP', global: false})
+      ]);
+      manageAlertGroupsController.set('isLoaded', true);
+      alertNotifications = manageAlertGroupsController.get('alertNotifications');
+    });
+
+    afterEach(function () {
+      App.AlertNotification.find.restore();
+    });
+
+    it('should be mapped from App.AlertNotification (1)', function () {
+      expect(alertNotifications).to.have.property('length').to.be.equal(2);
+    });
+
+    it('should be mapped from App.AlertNotification (2)', function () {
+      expect(JSON.parse(JSON.stringify(alertNotifications[0]))).to.be.eql({id: 1, name: 'n1', description: 'n1d', type: 'EMAIL', global: true});
+    });
+
+    it('should be mapped from App.AlertNotification (3)', function () {
+      expect(JSON.parse(JSON.stringify(alertNotifications[1]))).to.be.eql({id: 2, name: 'n2', description: 'n2d', type: 'SNMP', global: false});
+    });
+
+  });
+
 });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js b/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
index d63dcb8..15b4da3 100644
--- a/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
+++ b/ambari-web/test/controllers/main/alerts/manage_alert_notifications_controller_test.js
@@ -1022,6 +1022,10 @@ describe('App.ManageAlertNotificationsController', function () {
 
       });
 
+      App.TestAliases.testAsComputedEqualProperties(getBodyClass(), 'allGroupsSelected', 'groupSelect.selection.length', 'groupSelect.content.length');
+
+      App.TestAliases.testAsComputedEqualProperties(getBodyClass(), 'allSeveritySelected', 'severitySelect.selection.length', 'severitySelect.content.length');
+
     });
 
   });
@@ -1330,4 +1334,10 @@ describe('App.ManageAlertNotificationsController', function () {
 
   });
 
+  App.TestAliases.testAsComputedMapBy(getController(), 'customPropertyNames', 'inputFields.customProperties', 'name');
+
+  App.TestAliases.testAsComputedExistsInByKey(getController(), 'isNewCustomPropertyExists', 'newCustomProperty.name', 'customPropertyNames', ['customA', 'customB']);
+
+  App.TestAliases.testAsComputedExistsInByKey(getController(), 'isNewCustomPropertyIgnored', 'newCustomProperty.name', 'ignoredCustomProperties', ['customA', 'customB']);
+
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/test/models/alerts/alert_config_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/alerts/alert_config_test.js b/ambari-web/test/models/alerts/alert_config_test.js
index 74a91f4..78a6b59 100644
--- a/ambari-web/test/models/alerts/alert_config_test.js
+++ b/ambari-web/test/models/alerts/alert_config_test.js
@@ -22,6 +22,18 @@ require('models/alerts/alert_config');
 
 var model;
 
+describe('App.AlertConfigProperty', function () {
+
+  function getConfigProperty() {
+    return App.AlertConfigProperty.create();
+  }
+
+  App.TestAliases.testAsComputedNotExistsIn(getConfigProperty(), 'isPreLabeled', 'displayType', ['radioButton']);
+
+  App.TestAliases.testAsComputedAlias(getConfigProperty(), 'apiFormattedValue', 'value');
+
+});
+
 describe('App.AlertConfigProperties', function () {
 
   describe('Parameter', function () {
@@ -399,4 +411,13 @@ describe('App.AlertConfigProperties', function () {
 
   });
 
+  describe('App.AlertConfigProperties.FormatString', function () {
+    function getFormatStringConfig() {
+      return App.AlertConfigProperties.FormatString.create();
+    }
+
+    App.TestAliases.testAsComputedIfThenElse(getFormatStringConfig(), 'apiProperty', 'isJMXMetric', 'source.jmx.value', 'source.ganglia.value');
+
+  });
+
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/test/models/alerts/alert_group_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/alerts/alert_group_test.js b/ambari-web/test/models/alerts/alert_group_test.js
index 036a32d..6f64b7a 100644
--- a/ambari-web/test/models/alerts/alert_group_test.js
+++ b/ambari-web/test/models/alerts/alert_group_test.js
@@ -22,8 +22,33 @@ function getModel() {
   return App.AlertGroup.createRecord();
 }
 
+var model;
+
 describe('App.AlertGroup', function() {
 
+  beforeEach(function () {
+    model = getModel();
+  });
+
   App.TestAliases.testAsComputedAlias(getModel(), 'isAddDefinitionsDisabled', 'default', 'boolean');
 
+  describe('#displayName', function () {
+
+    [
+      {name: 'abc', default: true, e: 'abc Default'},
+      {name: 'abc', default: false, e: 'abc'},
+      {name: '12345678901234567890', default: true, e: '123456789...234567890 Default'},
+      {name: '12345678901234567890', default: false, e: '123456789...234567890'},
+    ].forEach(function (test) {
+      it(test.name + ' ' + test.default, function () {
+        model.setProperties({
+          name: test.name,
+          default: test.default
+        });
+        expect(model.get('displayName')).to.be.equal(test.e);
+      });
+    });
+
+  });
+
 });
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7f1e8c0/ambari-web/test/models/alerts/alert_notification_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/alerts/alert_notification_test.js b/ambari-web/test/models/alerts/alert_notification_test.js
new file mode 100644
index 0000000..a03fd4e
--- /dev/null
+++ b/ambari-web/test/models/alerts/alert_notification_test.js
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+var model;
+
+function getModel() {
+  return App.AlertNotification.createRecord();
+}
+
+describe('App.AlertNotification', function () {
+
+  beforeEach(function () {
+    model = getModel();
+  });
+
+  describe('#displayName', function () {
+
+    [
+      {name: 'abc', enabled: true, e: 'abc'},
+      {name: 'abc', enabled: false, e: 'abc (Disabled)'},
+    ].forEach(function (test) {
+      it(test.name + ' ' + test.enabled, function () {
+        model.setProperties({
+          name: test.name,
+          enabled: test.enabled
+        });
+        expect(model.get('displayName')).to.be.equal(test.e);
+      });
+    });
+
+  });
+
+});


[21/50] [abbrv] ambari git commit: AMBARI-19576 - Downgrade request fails as existing Upgrade request is considered 'in progress' (rzang)

Posted by nc...@apache.org.
AMBARI-19576 - Downgrade request fails as existing Upgrade request is considered 'in progress' (rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/37baf9a1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/37baf9a1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/37baf9a1

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 37baf9a1a96fe3d668666b12e9cc6f65e2f64439
Parents: 6fa54ae
Author: Richard Zang <rz...@apache.org>
Authored: Tue Jan 17 11:49:23 2017 -0800
Committer: Richard Zang <rz...@apache.org>
Committed: Tue Jan 17 11:50:33 2017 -0800

----------------------------------------------------------------------
 .../controllers/main/admin/stack_and_upgrade_controller.js    | 7 ++++++-
 .../main/admin/stack_and_upgrade_controller_test.js           | 4 ----
 2 files changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/37baf9a1/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 93b9f1f..4f88d2f 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -677,7 +677,12 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
     var self = this;
     this.set('requestInProgress', true);
     this.abortUpgrade().done(function() {
-      self.startDowngrade(currentVersion);
+      var interval = setInterval(function() {
+        if (self.get('upgradeData.Upgrade.request_status') == 'ABORTED') {
+          clearInterval(interval);
+          self.startDowngrade(currentVersion);
+        }
+      }, 1000);
     });
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/37baf9a1/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
index 3acf1dd..4e93fd6 100644
--- a/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
+++ b/ambari-web/test/controllers/main/admin/stack_and_upgrade_controller_test.js
@@ -1110,10 +1110,6 @@ describe('App.MainAdminStackAndUpgradeController', function() {
       expect(controller.abortUpgrade.calledOnce).to.be.true;
     });
 
-    it('should run startDowngrade on done', function() {
-      expect(controller.startDowngrade.calledWith('versionInfo')).to.be.true;
-    });
-
   });
 
   describe("#startDowngrade()", function() {


[08/50] [abbrv] ambari git commit: AMBARI-19572 Move Master and HA wizards for all components should show config changes that will be done as part of the wizard. (ababiichuk)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/test/controllers/main/service/reassign/step3_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/reassign/step3_controller_test.js b/ambari-web/test/controllers/main/service/reassign/step3_controller_test.js
index e433f47..203d162 100644
--- a/ambari-web/test/controllers/main/service/reassign/step3_controller_test.js
+++ b/ambari-web/test/controllers/main/service/reassign/step3_controller_test.js
@@ -18,6 +18,8 @@
 
 var App = require('app');
 require('controllers/main/service/reassign/step3_controller');
+require('controllers/main/service/reassign_controller');
+var testHelpers = require('test/helpers');
 var controller;
 
 describe('App.ReassignMasterWizardStep3Controller', function () {
@@ -50,4 +52,636 @@ describe('App.ReassignMasterWizardStep3Controller', function () {
       expect(App.router.send.calledWith("next")).to.be.true;
     });
   });
+
+  describe('#setAdditionalConfigs()', function () {
+
+    beforeEach(function () {
+      sinon.stub(App, 'get').withArgs('isHaEnabled').returns(true);
+    });
+
+    afterEach(function () {
+      App.get.restore();
+    });
+
+    it('Component is absent', function () {
+      controller.set('additionalConfigsMap', []);
+      var configs = {};
+
+      expect(controller.setAdditionalConfigs(configs, 'COMP1', '')).to.be.false;
+      expect(configs).to.eql({});
+    });
+
+    it('configs for Hadoop 2 is present', function () {
+      controller.set('additionalConfigsMap', [
+        {
+          componentName: 'COMP1',
+          configs: {
+            'test-site': {
+              'property1': '<replace-value>:1111'
+            }
+          },
+          configs_Hadoop2: {
+            'test-site': {
+              'property2': '<replace-value>:2222'
+            }
+          }
+        }
+      ]);
+      var configs = {
+        'test-site': {}
+      };
+
+      expect(controller.setAdditionalConfigs(configs, 'COMP1', 'host1')).to.be.true;
+      expect(configs).to.eql({
+        'test-site': {
+          'property2': 'host1:2222'
+        }
+      });
+    });
+
+    it('ignore some configs for NameNode after HA', function () {
+      controller.set('additionalConfigsMap', [
+        {
+          componentName: 'NAMENODE',
+          configs: {
+            'test-site': {
+              'fs.defaultFS': '<replace-value>:1111',
+              'dfs.namenode.rpc-address': '<replace-value>:1111'
+            }
+          }
+        }
+      ]);
+      var configs = {'test-site': {}};
+
+      expect(controller.setAdditionalConfigs(configs, 'NAMENODE', 'host1')).to.be.true;
+      expect(configs).to.eql({'test-site': {}});
+    });
+  });
+
+  describe('#getConfigUrlParams()', function () {
+    var testCases = [
+      {
+        componentName: 'NAMENODE',
+        result: [
+          "(type=hdfs-site&tag=1)",
+          "(type=core-site&tag=2)"
+        ]
+      },
+      {
+        componentName: 'SECONDARY_NAMENODE',
+        result: [
+          "(type=hdfs-site&tag=1)",
+          "(type=core-site&tag=2)"
+        ]
+      },
+      {
+        componentName: 'JOBTRACKER',
+        result: [
+          "(type=mapred-site&tag=4)"
+        ]
+      },
+      {
+        componentName: 'RESOURCEMANAGER',
+        result: [
+          "(type=yarn-site&tag=5)"
+        ]
+      },
+      {
+        componentName: 'APP_TIMELINE_SERVER',
+        result: [
+          "(type=yarn-site&tag=5)",
+          "(type=yarn-env&tag=8)"
+        ]
+      },
+      {
+        componentName: 'OOZIE_SERVER',
+        result: [
+          "(type=oozie-site&tag=6)",
+          "(type=core-site&tag=2)",
+          "(type=oozie-env&tag=2)"
+        ]
+      },
+      {
+        componentName: 'WEBHCAT_SERVER',
+        result: [
+          "(type=hive-env&tag=11)",
+          "(type=webhcat-site&tag=7)",
+          "(type=core-site&tag=2)"
+        ]
+      },
+      {
+        componentName: 'HIVE_SERVER',
+        result: [
+          '(type=hive-site&tag=10)',
+          '(type=webhcat-site&tag=7)',
+          '(type=hive-env&tag=11)',
+          '(type=core-site&tag=2)'
+        ]
+      },
+      {
+        componentName: 'HIVE_METASTORE',
+        result: [
+          '(type=hive-site&tag=10)',
+          '(type=webhcat-site&tag=7)',
+          '(type=hive-env&tag=11)',
+          '(type=core-site&tag=2)'
+        ]
+      },
+      {
+        componentName: 'MYSQL_SERVER',
+        result: [
+          '(type=hive-site&tag=10)'
+        ]
+      },
+      {
+        componentName: 'HISTORYSERVER',
+        result: [
+          '(type=mapred-site&tag=4)'
+        ]
+      }
+    ];
+
+    var data = {
+      Clusters: {
+        desired_configs: {
+          'hdfs-site': {tag: 1},
+          'core-site': {tag: 2},
+          'hbase-site': {tag: 3},
+          'mapred-site': {tag: 4},
+          'yarn-site': {tag: 5},
+          'oozie-site': {tag: 6},
+          'oozie-env': {tag: 2},
+          'webhcat-site': {tag: 7},
+          'yarn-env': {tag: 8},
+          'accumulo-site': {tag: 9},
+          'hive-site': {tag: 10},
+          'hive-env': {tag: 11}
+        }
+      }
+    };
+
+    var services = [];
+
+    beforeEach(function () {
+      controller.set('wizardController', App.get('router.reassignMasterController'));
+      sinon.stub(App.Service, 'find', function () {
+        return services;
+      });
+    });
+    afterEach(function () {
+      App.Service.find.restore();
+    });
+
+    testCases.forEach(function (test) {
+      it('get config of ' + test.componentName, function () {
+        expect(controller.getConfigUrlParams(test.componentName, data)).to.eql(test.result);
+      });
+    });
+    it('get config of NAMENODE when HBASE installed', function () {
+      services = [
+        {
+          serviceName: 'HBASE'
+        }
+      ];
+      expect(controller.getConfigUrlParams('NAMENODE', data)).to.eql([
+        "(type=hdfs-site&tag=1)",
+        "(type=core-site&tag=2)",
+        "(type=hbase-site&tag=3)"
+      ]);
+    });
+
+    it('get config of NAMENODE when ACCUMULO installed', function () {
+      services = [
+        {
+          serviceName: 'ACCUMULO'
+        }
+      ];
+      expect(controller.getConfigUrlParams('NAMENODE', data)).to.eql([
+        "(type=hdfs-site&tag=1)",
+        "(type=core-site&tag=2)",
+        "(type=accumulo-site&tag=9)"
+      ]);
+    });
+
+  });
+
+  describe('#onLoadConfigsTags()', function () {
+    var dummyData = {
+      Clusters: {
+        desired_configs : {}
+      }
+    };
+
+    beforeEach(function () {
+      sinon.stub(controller, 'getConfigUrlParams', function () {
+        return [];
+      });
+      controller.set('content', {
+        reassign: {
+          component_name: 'COMP1'
+        }
+      });
+      controller.onLoadConfigsTags(dummyData);
+      this.args = testHelpers.findAjaxRequest('name', 'reassign.load_configs');
+    });
+
+    afterEach(function () {
+      controller.getConfigUrlParams.restore();
+    });
+
+    it('request is sent', function () {
+      expect(this.args).exists;
+    });
+
+    it('getConfigUrlParams is called with correct data', function () {
+      expect(controller.getConfigUrlParams.calledWith('COMP1', dummyData)).to.be.true;
+    });
+  });
+
+  describe('#setSecureConfigs()', function () {
+
+    beforeEach(function () {
+      this.stub = sinon.stub(App, 'get');
+    });
+
+    afterEach(function () {
+      Em.tryInvoke(App.get, 'restore');
+    });
+
+    it('undefined component and security disabled', function () {
+      var secureConfigs = [];
+      this.stub.withArgs('isKerberosEnabled').returns(false);
+      controller.set('secureConfigsMap', []);
+      expect(controller.setSecureConfigs(secureConfigs, {}, 'COMP1')).to.be.false;
+      expect(secureConfigs).to.eql([]);
+    });
+
+    it('component exist and security disabled', function () {
+      var secureConfigs = [];
+      this.stub.withArgs('isKerberosEnabled').returns(false);
+      controller.set('secureConfigsMap', [{
+        componentName: 'COMP1'
+      }]);
+      expect(controller.setSecureConfigs(secureConfigs, {}, 'COMP1')).to.be.false;
+      expect(secureConfigs).to.eql([]);
+    });
+
+    it('undefined component and security enabled', function () {
+      var secureConfigs = [];
+      this.stub.withArgs('isKerberosEnabled').returns(true);
+      controller.set('secureConfigsMap', []);
+      expect(controller.setSecureConfigs(secureConfigs, {}, 'COMP1')).to.be.false;
+      expect(secureConfigs).to.eql([]);
+    });
+    it('component exist and security enabled', function () {
+      var secureConfigs = [];
+      this.stub.withArgs('isKerberosEnabled').returns(true);
+      var configs = {'s1': {
+        'k1': 'kValue',
+        'p1': 'pValue'
+      }};
+      controller.set('secureConfigsMap', [{
+        componentName: 'COMP1',
+        configs: [{
+          site: 's1',
+          keytab: 'k1',
+          principal: 'p1'
+        }]
+      }]);
+      expect(controller.setSecureConfigs(secureConfigs, configs, 'COMP1')).to.be.true;
+      expect(secureConfigs).to.eql([
+        {
+          "keytab": "kValue",
+          "principal": "pValue"
+        }
+      ]);
+    });
+  });
+
+  describe('#setDynamicCinfigs()', function () {
+
+    describe('HIVE', function() {
+      beforeEach(function () {
+        controller.set('content', Em.Object.create({
+          masterComponentHosts: [
+            {
+              component: 'HIVE_METASTORE',
+              hostName: 'host1'
+            },
+            {
+              component: 'HIVE_METASTORE',
+              hostName: 'host3'
+            },
+            {
+              component: 'HIVE_SERVER',
+              hostName: 'host4'
+            }
+          ],
+          reassignHosts: {
+            source: 'host1',
+            target: 'host2'
+          }
+        }));
+      });
+      it("reassign component is HIVE_METASTORE", function() {
+        var configs = {
+          'hive-env': {
+            'hive_user': 'hive_user'
+          },
+          'hive-site': {
+            'hive.metastore.uris': ''
+          },
+          'webhcat-site': {
+            'templeton.hive.properties': 'thrift'
+          },
+          'core-site': {
+            'hadoop.proxyuser.hive_user.hosts': ''
+          }
+        };
+        App.MoveHmConfigInitializer.setup(controller._getHiveInitializerSettings(configs));
+        configs = controller.setDynamicConfigs(configs, App.MoveHmConfigInitializer);
+        expect(configs['hive-site']['hive.metastore.uris']).to.equal('thrift://host3:9083,thrift://host2:9083');
+        expect(configs['webhcat-site']['templeton.hive.properties']).to.equal('thrift');
+        expect(configs['core-site']['hadoop.proxyuser.hive_user.hosts']).to.equal('host2,host3,host4');
+      });
+
+      it("reassign component is HIVE_SERVER", function() {
+        controller.get('content.masterComponentHosts').pushObject({component: 'HIVE_SERVER', hostName: 'host1'});
+        var configs = {
+          'hive-env': {
+            'hive_user': 'hive_user'
+          },
+          'hive-site': {
+            'hive.metastore.uris': ''
+          },
+          'webhcat-site': {
+            'templeton.hive.properties': 'thrift'
+          },
+          'core-site': {
+            'hadoop.proxyuser.hive_user.hosts': ''
+          }
+        };
+        App.MoveHsConfigInitializer.setup(controller._getHiveInitializerSettings(configs));
+        configs = controller.setDynamicConfigs(configs, App.MoveHsConfigInitializer);
+        expect(configs['core-site']['hadoop.proxyuser.hive_user.hosts']).to.equal('host1,host2,host3,host4');
+      });
+
+      it("reassign component is WEBHCAT_SERVER", function() {
+        controller.get('content.masterComponentHosts').pushObject({component: 'WEBHCAT_SERVER', hostName: 'host1'});
+        var configs = {
+          'hive-env': {
+            'webhcat_user': 'webhcat_user'
+          },
+          'hive-site': {
+            'hive.metastore.uris': ''
+          },
+          'webhcat-site': {
+            'templeton.hive.properties': 'thrift'
+          },
+          'core-site': {
+            'hadoop.proxyuser.webhcat_user.hosts': ''
+          }
+        };
+        App.MoveWsConfigInitializer.setup(controller._getWsInitializerSettings(configs));
+        configs = controller.setDynamicConfigs(configs, App.MoveWsConfigInitializer);
+        expect(configs['core-site']['hadoop.proxyuser.webhcat_user.hosts']).to.equal('host2');
+      });
+    });
+
+    describe('RESOURCEMANAGER', function () {
+      beforeEach(function () {
+        sinon.stub(App, 'get').withArgs('isRMHaEnabled').returns(true);
+      });
+      afterEach(function () {
+        App.get.restore();
+        App.MoveRmConfigInitializer.cleanup();
+      });
+
+      it('HA enabled and resource manager 1', function () {
+        controller.set('content', Em.Object.create({
+          reassignHosts: {
+            source: 'host1',
+            target: 'host3'
+          }
+        }));
+        var configs = {
+          'yarn-site': {
+            'yarn.resourcemanager.hostname.rm1': 'host1',
+            'yarn.resourcemanager.webapp.address.rm1': 'host1:8088',
+            'yarn.resourcemanager.webapp.https.address.rm1': 'host1:8443',
+            'yarn.resourcemanager.hostname.rm2': 'host2',
+            'yarn.resourcemanager.webapp.address.rm2': 'host2:8088',
+            'yarn.resourcemanager.webapp.https.address.rm2': 'host2:8443'
+          }
+        };
+        var additionalDependencies = controller._getRmAdditionalDependencies(configs);
+        App.MoveRmConfigInitializer.setup(controller._getRmInitializerSettings(configs));
+        configs = controller.setDynamicConfigs(configs, App.MoveRmConfigInitializer, additionalDependencies);
+        expect(configs['yarn-site']).to.eql({
+          'yarn.resourcemanager.hostname.rm1': 'host3',
+          'yarn.resourcemanager.webapp.address.rm1': 'host3:8088',
+          'yarn.resourcemanager.webapp.https.address.rm1': 'host3:8443',
+          'yarn.resourcemanager.hostname.rm2': 'host2',
+          'yarn.resourcemanager.webapp.address.rm2': 'host2:8088',
+          'yarn.resourcemanager.webapp.https.address.rm2': 'host2:8443'
+        });
+      });
+
+      it('HA enabled and resource manager 2', function () {
+        controller.set('content', Em.Object.create({
+          reassignHosts: {
+            source: 'host2',
+            target: 'host3'
+          }
+        }));
+        var configs = {
+          'yarn-site': {
+            'yarn.resourcemanager.hostname.rm1': 'host1',
+            'yarn.resourcemanager.webapp.address.rm1': 'host1:8088',
+            'yarn.resourcemanager.webapp.https.address.rm1': 'host1:8443',
+            'yarn.resourcemanager.hostname.rm2': 'host2',
+            'yarn.resourcemanager.webapp.address.rm2': 'host2:8088',
+            'yarn.resourcemanager.webapp.https.address.rm2': 'host2:8443'
+          }
+        };
+        var additionalDependencies = controller._getRmAdditionalDependencies(configs);
+        App.MoveRmConfigInitializer.setup(controller._getRmInitializerSettings(configs));
+        configs = controller.setDynamicConfigs(configs, App.MoveRmConfigInitializer, additionalDependencies);
+
+        expect(configs['yarn-site']).to.eql({
+          'yarn.resourcemanager.hostname.rm1': 'host1',
+          'yarn.resourcemanager.webapp.address.rm1': 'host1:8088',
+          'yarn.resourcemanager.webapp.https.address.rm1': 'host1:8443',
+          'yarn.resourcemanager.hostname.rm2': 'host3',
+          'yarn.resourcemanager.webapp.address.rm2': 'host3:8088',
+          'yarn.resourcemanager.webapp.https.address.rm2': 'host3:8443'
+        });
+      });
+    });
+
+    describe('NAMENODE', function () {
+      var isHaEnabled = false;
+
+      beforeEach(function () {
+        sinon.stub(App, 'get', function () {
+          return isHaEnabled;
+        });
+        sinon.stub(App.Service, 'find', function () {
+          return [
+            {serviceName: 'HDFS'},
+            {serviceName: 'ACCUMULO'},
+            {serviceName: 'HBASE'},
+            {serviceName: 'HAWQ'}
+          ];
+        });
+        controller.set('content', Em.Object.create({
+          reassignHosts: {
+            source: 'host1'
+          }
+        }));
+      });
+
+      afterEach(function () {
+        App.get.restore();
+        App.Service.find.restore();
+        App.MoveNameNodeConfigInitializer.cleanup();
+      });
+
+      it('HA isn\'t enabled and HBASE, HAWQ and ACCUMULO service', function () {
+        isHaEnabled = false;
+        var configs = {
+          'hbase-site': {
+            'hbase.rootdir': 'hdfs://localhost:8020/apps/hbase/data'
+          },
+          'accumulo-site': {
+            'instance.volumes': 'hdfs://localhost:8020/apps/accumulo/data',
+            'instance.volumes.replacements': ''
+          },
+          'hawq-site': {
+            'hawq_dfs_url': 'localhost:8020/hawq/data'
+          }
+        };
+
+        controller.set('content.reassignHosts.target', 'host2');
+
+        App.MoveNameNodeConfigInitializer.setup(controller._getNnInitializerSettings(configs));
+        configs = controller.setDynamicConfigs(configs, App.MoveNameNodeConfigInitializer);
+
+        expect(configs['hbase-site']['hbase.rootdir']).to.equal('hdfs://host2:8020/apps/hbase/data');
+        expect(configs['accumulo-site']['instance.volumes']).to.equal('hdfs://host2:8020/apps/accumulo/data');
+        expect(configs['accumulo-site']['instance.volumes.replacements']).to.equal('hdfs://host1:8020/apps/accumulo/data hdfs://host2:8020/apps/accumulo/data');
+        expect(configs['hawq-site'].hawq_dfs_url).to.equal('host2:8020/hawq/data');
+      });
+
+      it('HA enabled and namenode 1', function () {
+        isHaEnabled = true;
+        var configs = {
+          'hdfs-site': {
+            'dfs.nameservices': 's',
+            'dfs.namenode.http-address.s.nn1': 'host1:50070',
+            'dfs.namenode.https-address.s.nn1': 'host1:50470',
+            'dfs.namenode.rpc-address.s.nn1': 'host1:8020'
+          },
+          'hdfs-client': {
+            'dfs.namenode.rpc-address.s.nn1': '',
+            'dfs.namenode.http-address.s.nn1': 'host1:50070'
+          }
+        };
+
+        controller.set('content.reassignHosts.target', 'host2');
+        App.MoveNameNodeConfigInitializer.setup(controller._getNnInitializerSettings(configs));
+        configs = controller.setDynamicConfigs(configs, App.MoveNameNodeConfigInitializer);
+        expect(configs['hdfs-site']).to.eql({
+          "dfs.nameservices": "s",
+          "dfs.namenode.http-address.s.nn1": "host2:50070",
+          "dfs.namenode.https-address.s.nn1": "host2:50470",
+          "dfs.namenode.rpc-address.s.nn1": "host2:8020"
+        });
+        expect(configs['hdfs-client']).to.eql({
+          "dfs.namenode.http-address.s.nn1": "host2:50070",
+          "dfs.namenode.rpc-address.s.nn1": "host2:8020"
+        });
+      });
+
+      it('HA enabled and namenode 2', function () {
+        isHaEnabled = true;
+        var configs = {
+          'hdfs-site': {
+            'dfs.nameservices': 's',
+            "dfs.namenode.http-address.s.nn1": "host1:50070",
+            'dfs.namenode.http-address.s.nn2': 'host2:50070',
+            'dfs.namenode.https-address.s.nn2': 'host2:50470',
+            'dfs.namenode.rpc-address.s.nn2': 'host2:8020'
+          },
+          'hdfs-client': {
+            'dfs.namenode.rpc-address.s.nn2': '',
+            'dfs.namenode.http-address.s.nn2': 'host2:50070'
+          }
+        };
+        controller.set('content.reassignHosts.source', 'host2');
+        controller.set('content.reassignHosts.target', 'host3');
+
+        App.MoveNameNodeConfigInitializer.setup(controller._getNnInitializerSettings(configs));
+        configs = controller.setDynamicConfigs(configs, App.MoveNameNodeConfigInitializer);
+
+        expect(configs['hdfs-site']).to.eql({
+          "dfs.nameservices": "s",
+          "dfs.namenode.http-address.s.nn1": "host1:50070",
+          "dfs.namenode.http-address.s.nn2": "host3:50070",
+          "dfs.namenode.https-address.s.nn2": "host3:50470",
+          "dfs.namenode.rpc-address.s.nn2": "host3:8020"
+        });
+        expect(configs['hdfs-client']).to.eql({
+          "dfs.namenode.http-address.s.nn2": "host3:50070",
+          "dfs.namenode.rpc-address.s.nn2": "host3:8020"
+        });
+      });
+
+    });
+
+    describe('OOZIE_SERVER', function () {
+
+      it('should upodate hadoop.proxyuser.${oozie_user}.hosts', function () {
+
+        var configs = {
+          'oozie-env': {
+            'oozie_user': 'cool_dude'
+          },
+          'core-site': {
+            'hadoop.proxyuser.cool_dude.hosts': ''
+          }
+        };
+
+        controller.set('content', Em.Object.create({
+          masterComponentHosts: [
+            {
+              component: 'OOZIE_SERVER',
+              hostName: 'host2'
+            },
+            {
+              component: 'OOZIE_SERVER',
+              hostName: 'host3'
+            },
+            {
+              component: 'OOZIE_SERVER',
+              hostName: 'host1'
+            }
+          ],
+          reassignHosts: {
+            source: 'host1',
+            target: 'host4'
+          }
+        }));
+
+        App.MoveOSConfigInitializer.setup(controller._getOsInitializerSettings(configs));
+        configs = controller.setDynamicConfigs(configs, App.MoveOSConfigInitializer);
+        App.MoveOSConfigInitializer.cleanup();
+
+        expect(configs['core-site']['hadoop.proxyuser.cool_dude.hosts']).to.equal('host2,host3,host4');
+
+      });
+
+    });
+
+  });
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js b/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js
index 9a73524..aac15b8 100644
--- a/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js
+++ b/ambari-web/test/controllers/main/service/reassign/step4_controller_test.js
@@ -19,6 +19,7 @@
 var App = require('app');
 
 require('controllers/main/service/reassign/step4_controller');
+require('controllers/main/service/reassign_controller');
 var testHelpers = require('test/helpers');
 
 describe('App.ReassignMasterWizardStep4Controller', function () {
@@ -30,71 +31,6 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
     })
   });
 
-  describe('#setAdditionalConfigs()', function () {
-
-    beforeEach(function () {
-      sinon.stub(App, 'get').withArgs('isHaEnabled').returns(true);
-    });
-
-    afterEach(function () {
-      App.get.restore();
-    });
-
-    it('Component is absent', function () {
-      controller.set('additionalConfigsMap', []);
-      var configs = {};
-
-      expect(controller.setAdditionalConfigs(configs, 'COMP1', '')).to.be.false;
-      expect(configs).to.eql({});
-    });
-
-    it('configs for Hadoop 2 is present', function () {
-      controller.set('additionalConfigsMap', [
-        {
-          componentName: 'COMP1',
-          configs: {
-            'test-site': {
-              'property1': '<replace-value>:1111'
-            }
-          },
-          configs_Hadoop2: {
-            'test-site': {
-              'property2': '<replace-value>:2222'
-            }
-          }
-        }
-      ]);
-      var configs = {
-        'test-site': {}
-      };
-
-      expect(controller.setAdditionalConfigs(configs, 'COMP1', 'host1')).to.be.true;
-      expect(configs).to.eql({
-        'test-site': {
-          'property2': 'host1:2222'
-        }
-      });
-    });
-
-    it('ignore some configs for NameNode after HA', function () {
-      controller.set('additionalConfigsMap', [
-        {
-          componentName: 'NAMENODE',
-          configs: {
-            'test-site': {
-              'fs.defaultFS': '<replace-value>:1111',
-              'dfs.namenode.rpc-address': '<replace-value>:1111'
-            }
-          }
-        }
-      ]);
-      var configs = {'test-site': {}};
-
-      expect(controller.setAdditionalConfigs(configs, 'NAMENODE', 'host1')).to.be.true;
-      expect(configs).to.eql({'test-site': {}});
-    });
-  });
-
   describe('#getHostComponentsNames()', function () {
     it('No host-components', function () {
       controller.set('hostComponents', []);
@@ -113,7 +49,11 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
   describe('#testDBConnection', function() {
     beforeEach(function() {
       controller.set('requiredProperties', Em.A([]));
-      controller.set('content.serviceProperties', Em.Object.create({'javax.jdo.option.ConnectionDriverName': 'mysql'}));
+      controller.set('content.configs', Em.Object.create({
+        'hive-site': {
+          'javax.jdo.option.ConnectionDriverName': 'mysql'
+        }
+      }));
       controller.set('content.reassign.component_name', 'HIVE_SERVER');
       sinon.stub(controller, 'getConnectionProperty', Em.K);
       sinon.stub(App.router, 'get', Em.K);
@@ -200,7 +140,7 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
       controller.set('content.hasManualSteps', false);
 
       controller.removeUnneededTasks();
-      expect(controller.get('tasks').mapProperty('id')).to.eql([1,3,4,5,6,9,12,13]);
+      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 6, 9, 12, 13]);
     });
 
     it('reassign component is not NameNode and HA disabled', function () {
@@ -210,7 +150,7 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
       isHaEnabled = false;
 
       controller.removeUnneededTasks();
-      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 5, 6]);
+      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 6]);
     });
 
     it('reassign component is not NameNode and HA enabled', function () {
@@ -220,7 +160,7 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
       isHaEnabled = true;
 
       controller.removeUnneededTasks();
-      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 5, 6]);
+      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 6]);
     });
 
     it('reassign component is NameNode and HA disabled', function () {
@@ -230,7 +170,7 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
       isHaEnabled = false;
 
       controller.removeUnneededTasks();
-      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 5, 6]);
+      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 6]);
     });
 
     it('reassign component is NameNode and HA enabled', function () {
@@ -240,7 +180,19 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
       isHaEnabled = true;
 
       controller.removeUnneededTasks();
-      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 5, 6, 7, 8]);
+      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 6, 7, 8]);
+    });
+
+    it('component with reconfiguration', function () {
+      controller.set('tasks', commands);
+      controller.set('content.hasManualSteps', false);
+      controller.set('content.reassign.component_name', 'COMP1');
+      controller.set('wizardController', {
+        isComponentWithReconfiguration: true
+      });
+
+      controller.removeUnneededTasks();
+      expect(controller.get('tasks').mapProperty('id')).to.eql([1, 3, 4, 5, 6, 9, 13]);
     });
 
     it('reassign component is HiveServer and db type is mysql', function () {
@@ -287,17 +239,6 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
       expect(controller.get('tasks').mapProperty('id')).to.eql([1,2,3,4,5,6,7,8,9,10,11,12]);
     });
 
-    it('reassign component is Metrics Collector', function () {
-      controller.set('content.hasManualSteps', false);
-      controller.set('content.databaseType', 'mysql');
-      controller.set('content.reassign.component_name', 'METRICS_COLLECTOR');
-      isHaEnabled = false;
-
-      controller.set('tasks', commandsForDB);
-      controller.removeUnneededTasks();
-      expect(controller.get('tasks').mapProperty('id')).to.eql([1,2,5,6,8,10,12]);
-    });
-
     it('reassign component is Mysql Server', function () {
       controller.set('content.hasManualSteps', false);
       controller.set('content.databaseType', 'mysql');
@@ -312,6 +253,7 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
 
   describe("#stopRequiredServices()", function() {
     before(function () {
+      controller.set('wizardController', App.get('router.reassignMasterController'));
       sinon.stub(controller, 'stopServices', Em.K);
     });
     after(function () {
@@ -510,199 +452,23 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
   describe('#reconfigure()', function () {
 
     beforeEach(function () {
-      sinon.stub(controller, 'loadConfigsTags', Em.K);
+      sinon.stub(controller, 'saveClusterStatus', Em.K);
+      sinon.stub(controller, 'saveConfigsToServer', Em.K);
     });
 
     afterEach(function () {
-      controller.loadConfigsTags.restore();
+      controller.saveClusterStatus.restore();
+      controller.saveConfigsToServer.restore();
     });
 
-    it('loadConfigsTags is called once', function () {
+    it('saveClusterStatus is called once', function () {
       controller.reconfigure();
-      expect(controller.loadConfigsTags.calledOnce).to.be.true;
-    });
-  });
-
-  describe('#loadConfigsTags()', function () {
-    it('request is sent', function () {
-      controller.loadConfigsTags();
-      var args = testHelpers.findAjaxRequest('name', 'config.tags');
-      expect(args).exists;
-    });
-  });
-
-  describe('#getConfigUrlParams()', function () {
-    var testCases = [
-      {
-        componentName: 'NAMENODE',
-        result: [
-          "(type=hdfs-site&tag=1)",
-          "(type=core-site&tag=2)"
-        ]
-      },
-      {
-        componentName: 'SECONDARY_NAMENODE',
-        result: [
-          "(type=hdfs-site&tag=1)",
-          "(type=core-site&tag=2)"
-        ]
-      },
-      {
-        componentName: 'JOBTRACKER',
-        result: [
-          "(type=mapred-site&tag=4)"
-        ]
-      },
-      {
-        componentName: 'RESOURCEMANAGER',
-        result: [
-          "(type=yarn-site&tag=5)"
-        ]
-      },
-      {
-        componentName: 'APP_TIMELINE_SERVER',
-        result: [
-          "(type=yarn-site&tag=5)",
-          "(type=yarn-env&tag=8)"
-        ]
-      },
-      {
-        componentName: 'OOZIE_SERVER',
-        result: [
-          "(type=oozie-site&tag=6)",
-          "(type=core-site&tag=2)",
-          "(type=oozie-env&tag=2)"
-        ]
-      },
-      {
-        componentName: 'WEBHCAT_SERVER',
-        result: [
-          "(type=hive-env&tag=11)",
-          "(type=webhcat-site&tag=7)",
-          "(type=core-site&tag=2)"
-        ]
-      },
-      {
-        componentName: 'HIVE_SERVER',
-        result: [
-          '(type=hive-site&tag=10)',
-          '(type=webhcat-site&tag=7)',
-          '(type=hive-env&tag=11)',
-          '(type=core-site&tag=2)'
-        ]
-      },
-      {
-        componentName: 'HIVE_METASTORE',
-        result: [
-          '(type=hive-site&tag=10)',
-          '(type=webhcat-site&tag=7)',
-          '(type=hive-env&tag=11)',
-          '(type=core-site&tag=2)'
-        ]
-      },
-      {
-        componentName: 'MYSQL_SERVER',
-        result: [
-          '(type=hive-site&tag=10)'
-        ]
-      },
-      {
-        componentName: 'HISTORYSERVER',
-        result: [
-          '(type=mapred-site&tag=4)'
-        ]
-      }
-    ];
-
-    var data = {
-      Clusters: {
-        desired_configs: {
-          'hdfs-site': {tag: 1},
-          'core-site': {tag: 2},
-          'hbase-site': {tag: 3},
-          'mapred-site': {tag: 4},
-          'yarn-site': {tag: 5},
-          'oozie-site': {tag: 6},
-          'oozie-env': {tag: 2},
-          'webhcat-site': {tag: 7},
-          'yarn-env': {tag: 8},
-          'accumulo-site': {tag: 9},
-          'hive-site': {tag: 10},
-          'hive-env': {tag: 11}
-        }
-      }
-    };
-
-    var services = [];
-
-    beforeEach(function () {
-      sinon.stub(App.Service, 'find', function () {
-        return services;
-      });
-    });
-    afterEach(function () {
-      App.Service.find.restore();
-    });
-
-    testCases.forEach(function (test) {
-      it('get config of ' + test.componentName, function () {
-        expect(controller.getConfigUrlParams(test.componentName, data)).to.eql(test.result);
-      });
-    });
-    it('get config of NAMENODE when HBASE installed', function () {
-      services = [
-        {
-          serviceName: 'HBASE'
-        }
-      ];
-      expect(controller.getConfigUrlParams('NAMENODE', data)).to.eql([
-        "(type=hdfs-site&tag=1)",
-        "(type=core-site&tag=2)",
-        "(type=hbase-site&tag=3)"
-      ]);
-    });
-
-    it('get config of NAMENODE when ACCUMULO installed', function () {
-      services = [
-        {
-          serviceName: 'ACCUMULO'
-        }
-      ];
-      expect(controller.getConfigUrlParams('NAMENODE', data)).to.eql([
-        "(type=hdfs-site&tag=1)",
-        "(type=core-site&tag=2)",
-        "(type=accumulo-site&tag=9)"
-      ]);
+      expect(controller.saveClusterStatus.calledOnce).to.be.true;
     });
 
-  });
-
-  describe('#onLoadConfigsTags()', function () {
-    var dummyData = {
-      Clusters: {
-        desired_configs : {}
-      }
-    };
-
-    beforeEach(function () {
-      sinon.stub(controller, 'getConfigUrlParams', function () {
-        return [];
-      });
-      controller.set('content.reassign.component_name', 'COMP1');
-      controller.onLoadConfigsTags(dummyData);
-      this.args = testHelpers.findAjaxRequest('name', 'reassign.load_configs');
-    });
-
-    afterEach(function () {
-      controller.getConfigUrlParams.restore();
-    });
-
-    it('request is sent', function () {
-      expect(this.args).exists;
-    });
-
-    it('getConfigUrlParams is called with correct data', function () {
-      expect(controller.getConfigUrlParams.calledWith('COMP1', dummyData)).to.be.true;
+    it('saveConfigsToServer is called once', function () {
+      controller.reconfigure();
+      expect(controller.saveConfigsToServer.calledOnce).to.be.true;
     });
   });
 
@@ -774,66 +540,6 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
     });
   });
 
-  describe('#setSecureConfigs()', function () {
-
-    beforeEach(function () {
-      this.stub = sinon.stub(App, 'get');
-    });
-
-    afterEach(function () {
-      Em.tryInvoke(App.get, 'restore');
-    });
-
-    it('undefined component and security disabled', function () {
-      var secureConfigs = [];
-      this.stub.withArgs('isKerberosEnabled').returns(false);
-      controller.set('secureConfigsMap', []);
-      expect(controller.setSecureConfigs(secureConfigs, {}, 'COMP1')).to.be.false;
-      expect(secureConfigs).to.eql([]);
-    });
-
-    it('component exist and security disabled', function () {
-      var secureConfigs = [];
-      this.stub.withArgs('isKerberosEnabled').returns(false);
-      controller.set('secureConfigsMap', [{
-        componentName: 'COMP1'
-      }]);
-      expect(controller.setSecureConfigs(secureConfigs, {}, 'COMP1')).to.be.false;
-      expect(secureConfigs).to.eql([]);
-    });
-
-    it('undefined component and security enabled', function () {
-      var secureConfigs = [];
-      this.stub.withArgs('isKerberosEnabled').returns(true);
-      controller.set('secureConfigsMap', []);
-      expect(controller.setSecureConfigs(secureConfigs, {}, 'COMP1')).to.be.false;
-      expect(secureConfigs).to.eql([]);
-    });
-    it('component exist and security enabled', function () {
-      var secureConfigs = [];
-      this.stub.withArgs('isKerberosEnabled').returns(true);
-      var configs = {'s1': {
-        'k1': 'kValue',
-        'p1': 'pValue'
-      }};
-      controller.set('secureConfigsMap', [{
-        componentName: 'COMP1',
-        configs: [{
-          site: 's1',
-          keytab: 'k1',
-          principal: 'p1'
-        }]
-      }]);
-      expect(controller.setSecureConfigs(secureConfigs, configs, 'COMP1')).to.be.true;
-      expect(secureConfigs).to.eql([
-        {
-          "keytab": "kValue",
-          "principal": "pValue"
-        }
-      ]);
-    });
-  });
-
   describe('#getComponentDir()', function () {
     var configs = {
       'hdfs-site': {
@@ -1318,292 +1024,6 @@ describe('App.ReassignMasterWizardStep4Controller', function () {
     });
   });
 
-  describe("#setDynamicConfigs HIVE", function() {
-    beforeEach(function () {
-      controller.set('content.masterComponentHosts', [
-        {component: 'HIVE_METASTORE', hostName: 'host1'},
-        {component: 'HIVE_METASTORE', hostName: 'host3'},
-        {component: 'HIVE_SERVER', hostName: 'host4'}
-      ]);
-      controller.set('content.reassignHosts.source', 'host1');
-      controller.set('content.reassignHosts.target', 'host2');
-    });
-    it("reassign component is HIVE_METASTORE", function() {
-      var configs = {
-        'hive-env': {
-          'hive_user': 'hive_user'
-        },
-        'hive-site': {
-          'hive.metastore.uris': ''
-        },
-        'webhcat-site': {
-          'templeton.hive.properties': 'thrift'
-        },
-        'core-site': {
-          'hadoop.proxyuser.hive_user.hosts': ''
-        }
-      };
-      App.MoveHmConfigInitializer.setup(controller._getHiveInitializerSettings(configs));
-      configs = controller.setDynamicConfigs(configs, App.MoveHmConfigInitializer);
-      expect(configs['hive-site']['hive.metastore.uris']).to.equal('thrift://host3:9083,thrift://host2:9083');
-      expect(configs['webhcat-site']['templeton.hive.properties']).to.equal('thrift');
-      expect(configs['core-site']['hadoop.proxyuser.hive_user.hosts']).to.equal('host2,host3,host4');
-    });
-
-    it("reassign component is HIVE_SERVER", function() {
-      controller.get('content.masterComponentHosts').pushObject({component: 'HIVE_SERVER', hostName: 'host1'});
-      var configs = {
-        'hive-env': {
-          'hive_user': 'hive_user'
-        },
-        'hive-site': {
-          'hive.metastore.uris': ''
-        },
-        'webhcat-site': {
-          'templeton.hive.properties': 'thrift'
-        },
-        'core-site': {
-          'hadoop.proxyuser.hive_user.hosts': ''
-        }
-      };
-      App.MoveHsConfigInitializer.setup(controller._getHiveInitializerSettings(configs));
-      configs = controller.setDynamicConfigs(configs, App.MoveHsConfigInitializer);
-      expect(configs['core-site']['hadoop.proxyuser.hive_user.hosts']).to.equal('host1,host2,host3,host4');
-    });
-
-    it("reassign component is WEBHCAT_SERVER", function() {
-      controller.get('content.masterComponentHosts').pushObject({component: 'WEBHCAT_SERVER', hostName: 'host1'});
-      var configs = {
-        'hive-env': {
-          'webhcat_user': 'webhcat_user'
-        },
-        'hive-site': {
-          'hive.metastore.uris': ''
-        },
-        'webhcat-site': {
-          'templeton.hive.properties': 'thrift'
-        },
-        'core-site': {
-          'hadoop.proxyuser.webhcat_user.hosts': ''
-        }
-      };
-      App.MoveWsConfigInitializer.setup(controller._getWsInitializerSettings(configs));
-      configs = controller.setDynamicConfigs(configs, App.MoveWsConfigInitializer);
-      expect(configs['core-site']['hadoop.proxyuser.webhcat_user.hosts']).to.equal('host2');
-    });
-  });
-
-  describe('#setDynamicConfigs RESOURCEMANAGER', function () {
-    beforeEach(function () {
-      sinon.stub(App, 'get').withArgs('isRMHaEnabled').returns(true);
-    });
-    afterEach(function () {
-      App.get.restore();
-      App.MoveRmConfigInitializer.cleanup();
-    });
-
-    it('HA enabled and resource manager 1', function () {
-      controller.set('content.reassignHosts.source', 'host1');
-      controller.set('content.reassignHosts.target', 'host3');
-      var configs = {
-        'yarn-site': {
-          'yarn.resourcemanager.hostname.rm1': 'host1',
-          'yarn.resourcemanager.webapp.address.rm1': 'host1:8088',
-          'yarn.resourcemanager.webapp.https.address.rm1': 'host1:8443',
-          'yarn.resourcemanager.hostname.rm2': 'host2',
-          'yarn.resourcemanager.webapp.address.rm2': 'host2:8088',
-          'yarn.resourcemanager.webapp.https.address.rm2': 'host2:8443'
-        }
-      };
-      var additionalDependencies = controller._getRmAdditionalDependencies(configs);
-      App.MoveRmConfigInitializer.setup(controller._getRmInitializerSettings(configs));
-      configs = controller.setDynamicConfigs(configs, App.MoveRmConfigInitializer, additionalDependencies);
-      expect(configs['yarn-site']).to.eql({
-        'yarn.resourcemanager.hostname.rm1': 'host3',
-        'yarn.resourcemanager.webapp.address.rm1': 'host3:8088',
-        'yarn.resourcemanager.webapp.https.address.rm1': 'host3:8443',
-        'yarn.resourcemanager.hostname.rm2': 'host2',
-        'yarn.resourcemanager.webapp.address.rm2': 'host2:8088',
-        'yarn.resourcemanager.webapp.https.address.rm2': 'host2:8443'
-      });
-    });
-
-    it('HA enabled and resource manager 2', function () {
-      controller.set('content.reassignHosts.source', 'host2');
-      controller.set('content.reassignHosts.target', 'host3');
-      var configs = {
-        'yarn-site': {
-          'yarn.resourcemanager.hostname.rm1': 'host1',
-          'yarn.resourcemanager.webapp.address.rm1': 'host1:8088',
-          'yarn.resourcemanager.webapp.https.address.rm1': 'host1:8443',
-          'yarn.resourcemanager.hostname.rm2': 'host2',
-          'yarn.resourcemanager.webapp.address.rm2': 'host2:8088',
-          'yarn.resourcemanager.webapp.https.address.rm2': 'host2:8443'
-        }
-      };
-      var additionalDependencies = controller._getRmAdditionalDependencies(configs);
-      App.MoveRmConfigInitializer.setup(controller._getRmInitializerSettings(configs));
-      configs = controller.setDynamicConfigs(configs, App.MoveRmConfigInitializer, additionalDependencies);
-
-      expect(configs['yarn-site']).to.eql({
-        'yarn.resourcemanager.hostname.rm1': 'host1',
-        'yarn.resourcemanager.webapp.address.rm1': 'host1:8088',
-        'yarn.resourcemanager.webapp.https.address.rm1': 'host1:8443',
-        'yarn.resourcemanager.hostname.rm2': 'host3',
-        'yarn.resourcemanager.webapp.address.rm2': 'host3:8088',
-        'yarn.resourcemanager.webapp.https.address.rm2': 'host3:8443'
-      });
-    });
-  });
-
-  describe('#setDynamicConfigs NAMENODE', function () {
-    var isHaEnabled = false;
-
-    beforeEach(function () {
-      sinon.stub(App, 'get', function () {
-        return isHaEnabled;
-      });
-      sinon.stub(App.Service, 'find', function () {
-        return [
-          {serviceName: 'HDFS'},
-          {serviceName: 'ACCUMULO'},
-          {serviceName: 'HBASE'},
-          {serviceName: 'HAWQ'}
-        ];
-      });
-      controller.set('content.reassignHosts.source', 'host1');
-    });
-
-    afterEach(function () {
-      App.get.restore();
-      App.Service.find.restore();
-      App.MoveNameNodeConfigInitializer.cleanup();
-    });
-
-    it('HA isn\'t enabled and HBASE, HAWQ and ACCUMULO service', function () {
-      isHaEnabled = false;
-      var configs = {
-        'hbase-site': {
-          'hbase.rootdir': 'hdfs://localhost:8020/apps/hbase/data'
-        },
-        'accumulo-site': {
-          'instance.volumes': 'hdfs://localhost:8020/apps/accumulo/data',
-          'instance.volumes.replacements': ''
-        },
-        'hawq-site': {
-          'hawq_dfs_url': 'localhost:8020/hawq/data'
-        }
-      };
-
-      controller.set('content.reassignHosts.target', 'host2');
-
-      App.MoveNameNodeConfigInitializer.setup(controller._getNnInitializerSettings(configs));
-      configs = controller.setDynamicConfigs(configs, App.MoveNameNodeConfigInitializer);
-
-      expect(configs['hbase-site']['hbase.rootdir']).to.equal('hdfs://host2:8020/apps/hbase/data');
-      expect(configs['accumulo-site']['instance.volumes']).to.equal('hdfs://host2:8020/apps/accumulo/data');
-      expect(configs['accumulo-site']['instance.volumes.replacements']).to.equal('hdfs://host1:8020/apps/accumulo/data hdfs://host2:8020/apps/accumulo/data');
-      expect(configs['hawq-site'].hawq_dfs_url).to.equal('host2:8020/hawq/data');
-    });
-
-    it('HA enabled and namenode 1', function () {
-      isHaEnabled = true;
-      var configs = {
-        'hdfs-site': {
-          'dfs.nameservices': 's',
-          'dfs.namenode.http-address.s.nn1': 'host1:50070',
-          'dfs.namenode.https-address.s.nn1': 'host1:50470',
-          'dfs.namenode.rpc-address.s.nn1': 'host1:8020'
-        },
-        'hdfs-client': {
-          'dfs.namenode.rpc-address.s.nn1': '',
-          'dfs.namenode.http-address.s.nn1': 'host1:50070'
-        }
-      };
-
-      controller.set('content.reassignHosts.target', 'host2');
-      App.MoveNameNodeConfigInitializer.setup(controller._getNnInitializerSettings(configs));
-      configs = controller.setDynamicConfigs(configs, App.MoveNameNodeConfigInitializer);
-      expect(configs['hdfs-site']).to.eql({
-        "dfs.nameservices": "s",
-        "dfs.namenode.http-address.s.nn1": "host2:50070",
-        "dfs.namenode.https-address.s.nn1": "host2:50470",
-        "dfs.namenode.rpc-address.s.nn1": "host2:8020"
-      });
-      expect(configs['hdfs-client']).to.eql({
-        "dfs.namenode.http-address.s.nn1": "host2:50070",
-        "dfs.namenode.rpc-address.s.nn1": "host2:8020"
-      });
-    });
-
-    it('HA enabled and namenode 2', function () {
-      isHaEnabled = true;
-      var configs = {
-        'hdfs-site': {
-          'dfs.nameservices': 's',
-          "dfs.namenode.http-address.s.nn1": "host1:50070",
-          'dfs.namenode.http-address.s.nn2': 'host2:50070',
-          'dfs.namenode.https-address.s.nn2': 'host2:50470',
-          'dfs.namenode.rpc-address.s.nn2': 'host2:8020'
-        },
-        'hdfs-client': {
-          'dfs.namenode.rpc-address.s.nn2': '',
-          'dfs.namenode.http-address.s.nn2': 'host2:50070'
-        }
-      };
-      controller.set('content.reassignHosts.source', 'host2');
-      controller.set('content.reassignHosts.target', 'host3');
-
-      App.MoveNameNodeConfigInitializer.setup(controller._getNnInitializerSettings(configs));
-      configs = controller.setDynamicConfigs(configs, App.MoveNameNodeConfigInitializer);
-
-      expect(configs['hdfs-site']).to.eql({
-        "dfs.nameservices": "s",
-        "dfs.namenode.http-address.s.nn1": "host1:50070",
-        "dfs.namenode.http-address.s.nn2": "host3:50070",
-        "dfs.namenode.https-address.s.nn2": "host3:50470",
-        "dfs.namenode.rpc-address.s.nn2": "host3:8020"
-      });
-      expect(configs['hdfs-client']).to.eql({
-        "dfs.namenode.http-address.s.nn2": "host3:50070",
-        "dfs.namenode.rpc-address.s.nn2": "host3:8020"
-      });
-    });
-
-  });
-
-  describe('#setDynamicConfigs OOZIE_SERVER', function () {
-
-    it('should upodate hadoop.proxyuser.${oozie_user}.hosts', function () {
-
-      var configs = {
-        'oozie-env': {
-          'oozie_user': 'cool_dude'
-        },
-        'core-site': {
-          'hadoop.proxyuser.cool_dude.hosts': ''
-        }
-      };
-
-      controller.set('content.masterComponentHosts', [
-        {component: 'OOZIE_SERVER', hostName: 'host2'},
-        {component: 'OOZIE_SERVER', hostName: 'host3'},
-        {component: 'OOZIE_SERVER', hostName: 'host1'}
-      ]);
-
-      controller.set('content.reassignHosts.source', 'host1');
-      controller.set('content.reassignHosts.target', 'host4');
-
-      App.MoveOSConfigInitializer.setup(controller._getOsInitializerSettings(configs));
-      configs = controller.setDynamicConfigs(configs, App.MoveOSConfigInitializer);
-      App.MoveOSConfigInitializer.cleanup();
-
-      expect(configs['core-site']['hadoop.proxyuser.cool_dude.hosts']).to.equal('host2,host3,host4');
-
-    });
-
-  });
-
   describe.skip("#prepareDBCheckAction()", function() {
     beforeEach(function () {
       sinon.stub(App.router, 'get').returns({


[48/50] [abbrv] ambari git commit: AMBARI-19606. Search button is not functional in the dashboard(Padma Priya N via gauravn7)

Posted by nc...@apache.org.
AMBARI-19606. Search button is not functional in the dashboard(Padma Priya N via gauravn7)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/61d0f640
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/61d0f640
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/61d0f640

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 61d0f640b0d647b9c196f60dcd09b41adba05504
Parents: f7155d9
Author: Gaurav Nagar <gr...@gmail.com>
Authored: Wed Jan 18 17:04:50 2017 +0530
Committer: Gaurav Nagar <gr...@gmail.com>
Committed: Wed Jan 18 17:05:30 2017 +0530

----------------------------------------------------------------------
 .../src/main/resources/ui/app/components/search-create-new-bar.js | 3 +++
 .../ui/app/templates/components/search-create-new-bar.hbs         | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/61d0f640/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
index f9226f4..e9bc44f 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/search-create-new-bar.js
@@ -189,6 +189,9 @@ export default Ember.Component.extend(Ember.Evented,{
             elem.addClass("btn-primary");
             this.sendAction('onSearch', { type: type, filter: filter });
         },
+        onSearchClicked(){
+          this.$('#search-field').tagsinput('add', 'Name:'+this.$('.tt-input').val());
+        },
         refresh(){
           this.sendAction('onSearch', this.get('history').getSearchParams());
         },

http://git-wip-us.apache.org/repos/asf/ambari/blob/61d0f640/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
index 33539ea..80f3b14 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/search-create-new-bar.hbs
@@ -28,7 +28,7 @@
         </ul>
       </div>
       <input id="search-field" type="text" class="form-control" data-role="tagsinput" placeholder="Filter(Eg:name:workflowname)" aria-describedby="basic-addon1">
-      <div class="search-icon"><i class="fa fa-search"></i></div>
+      <div class="search-icon" {{action 'onSearchClicked'}}><i class="fa fa-search"></i></div>
     </div>
   </div>
   <div class='form-group date-picker'>


[05/50] [abbrv] ambari git commit: AMBARI-19558. hivemetastore-report.json.tmp permission errors in hive metastore, HS2 logs (dgrinenko via dlysnichenko)

Posted by nc...@apache.org.
AMBARI-19558. hivemetastore-report.json.tmp permission errors in hive metastore, HS2 logs (dgrinenko via dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/112cea45
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/112cea45
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/112cea45

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 112cea4506ad18b49aa56989f1b1db786565ee50
Parents: b4bb42a
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Tue Jan 17 11:55:56 2017 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Tue Jan 17 12:06:14 2017 +0200

----------------------------------------------------------------------
 .../2.5/services/HIVE/configuration/hivemetastore-site.xml    | 7 +------
 .../HIVE/configuration/hiveserver2-interactive-site.xml       | 7 +------
 .../HDP/2.5/services/HIVE/configuration/hiveserver2-site.xml  | 7 +------
 .../src/test/python/stacks/2.0.6/configs/default.json         | 2 +-
 .../test/python/stacks/2.0.6/configs/default_hive_nn_ha.json  | 2 +-
 .../python/stacks/2.0.6/configs/default_hive_nn_ha_2.json     | 2 +-
 .../python/stacks/2.0.6/configs/default_hive_non_hdfs.json    | 2 +-
 .../test/python/stacks/2.0.6/configs/default_no_install.json  | 2 +-
 .../src/test/python/stacks/2.0.6/configs/secured.json         | 2 +-
 9 files changed, 9 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/112cea45/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hivemetastore-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hivemetastore-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hivemetastore-site.xml
index bb248b3..1db1668 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hivemetastore-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hivemetastore-site.xml
@@ -35,7 +35,7 @@ limitations under the License.
   </property>
   <property>
     <name>hive.service.metrics.reporter</name>
-    <value>JSON_FILE, JMX, HADOOP2</value>
+    <value>HADOOP2</value>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -43,9 +43,4 @@ limitations under the License.
     <value>hivemetastore</value>
     <on-ambari-upgrade add="true"/>
   </property>
-  <property>
-    <name>hive.service.metrics.file.location</name>
-    <value>/var/log/hive/hivemetastore-report.json</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/112cea45/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-interactive-site.xml
index e8b6bc8..d3b4dfd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-interactive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-interactive-site.xml
@@ -35,7 +35,7 @@ limitations under the License.
   </property>
   <property>
     <name>hive.service.metrics.reporter</name>
-    <value>JSON_FILE, JMX, HADOOP2</value>
+    <value>HADOOP2</value>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -44,11 +44,6 @@ limitations under the License.
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>hive.service.metrics.file.location</name>
-    <value>/var/log/hive/hiveserver2Interactive-report.json</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
     <name>hive.async.log.enabled</name>
     <value>false</value>
     <description>Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give significant performance improvement as logging will be handled in separate thread that uses LMAX disruptor queue for buffering log messages. Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and drawbacks.</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/112cea45/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-site.xml
index bd1821d..255598f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hiveserver2-site.xml
@@ -35,7 +35,7 @@ limitations under the License.
   </property>
   <property>
     <name>hive.service.metrics.reporter</name>
-    <value>JSON_FILE, JMX, HADOOP2</value>
+    <value>HADOOP2</value>
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
@@ -43,9 +43,4 @@ limitations under the License.
     <value>hiveserver2</value>
     <on-ambari-upgrade add="true"/>
   </property>
-  <property>
-    <name>hive.service.metrics.file.location</name>
-    <value>/var/log/hive/hiveserver2-report.json</value>
-    <on-ambari-upgrade add="true"/>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/112cea45/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index f54b645..849b737 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -431,7 +431,7 @@
         },
         "hiveserver2-site": {
             "hive.metastore.metrics.enabled": "true",
-            "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2"
+            "hive.service.metrics.reporter": "HADOOP2"
         },
 		"ranger-hive-plugin-properties": {
             "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",

http://git-wip-us.apache.org/repos/asf/ambari/blob/112cea45/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
index be8866d..2b92cca 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
@@ -254,7 +254,7 @@
         },
         "hiveserver2-site": {
             "hive.metastore.metrics.enabled": "true",
-            "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2"
+            "hive.service.metrics.reporter": "HADOOP2"
         },
         "ranger-hive-plugin-properties": {
             "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/112cea45/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
index ede360b..acac36f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
@@ -256,7 +256,7 @@
         },
         "hiveserver2-site": {
             "hive.metastore.metrics.enabled": "true",
-            "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2"
+            "hive.service.metrics.reporter": "HADOOP2"
         },
         "ranger-hive-plugin-properties": {
             "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/112cea45/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
index d49868f..a02a874 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
@@ -420,7 +420,7 @@
         },
         "hiveserver2-site": {
             "hive.metastore.metrics.enabled": "true",
-            "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2"
+            "hive.service.metrics.reporter": "HADOOP2"
         },
 		"ranger-hive-plugin-properties": {
             "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/112cea45/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
index 5b8a0af..73c49a1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
@@ -422,7 +422,7 @@
         },
         "hiveserver2-site": {
             "hive.metastore.metrics.enabled": "true",
-            "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2"
+            "hive.service.metrics.reporter": "HADOOP2"
         },
         "ranger-hive-plugin-properties": {
             "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/112cea45/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index 890b9f1..3367e1b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -538,7 +538,7 @@
         },
         "hiveserver2-site": {
             "hive.metastore.metrics.enabled": "true",
-            "hive.service.metrics.reporter": "JSON_FILE, JMX, HADOOP2"
+            "hive.service.metrics.reporter": "HADOOP2"
         },
         "yarn-site": {
             "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 


[38/50] [abbrv] ambari git commit: AMBARI-19600. AMS log4j rotation properties changes should be visible on ambari-upgrade (Madhuvanthi Radhakrishnan via smohanty)

Posted by nc...@apache.org.
AMBARI-19600. AMS log4j rotation properties changes should be visible on ambari-upgrade (Madhuvanthi Radhakrishnan via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/39174ea9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/39174ea9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/39174ea9

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 39174ea958150041957de9e7c65ba615da61483c
Parents: 410f294
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Jan 17 18:07:23 2017 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Jan 17 18:08:13 2017 -0800

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog250.java       |  27 ++
 .../0.1.0/configuration/ams-hbase-log4j.xml     |   8 +-
 .../0.1.0/configuration/ams-log4j.xml           |   4 +-
 .../server/upgrade/UpgradeCatalog250Test.java   | 346 +++++++++++++++++++
 4 files changed, 379 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/39174ea9/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
index 6638379..4e465c9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
@@ -59,6 +59,8 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
   protected static final String GROUP_TYPE_COL = "group_type";
   private static final String AMS_ENV = "ams-env";
   private static final String AMS_SITE = "ams-site";
+  private static final String AMS_LOG4J = "ams-log4j";
+  private static final String AMS_HBASE_LOG4J = "ams-hbase-log4j";
   private static final String AMS_MODE = "timeline.metrics.service.operation.mode";
   private static final String AMS_HBASE_SITE = "ams-hbase-site";
   private static final String HBASE_ROOTDIR = "hbase.rootdir";
@@ -313,6 +315,31 @@ public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
             }
           }
 
+          //Update AMS log4j to make rolling properties configurable as separate fields.
+          Config amsLog4jProperties = cluster.getDesiredConfigByType(AMS_LOG4J);
+          if(amsLog4jProperties != null){
+            Map<String, String> newProperties = new HashMap<>();
+
+            String content = amsLog4jProperties.getProperties().get("content");
+            content = SchemaUpgradeUtil.extractProperty(content,"ams_log_max_backup_size","ams_log_max_backup_size","log4j.appender.file.MaxFileSize=(\\w+)MB","80",newProperties);
+            content = SchemaUpgradeUtil.extractProperty(content,"ams_log_number_of_backup_files","ams_log_number_of_backup_files","log4j.appender.file.MaxBackupIndex=(\\w+)","60",newProperties);
+            newProperties.put("content",content);
+            updateConfigurationPropertiesForCluster(cluster,AMS_LOG4J,newProperties,true,true);
+          }
+
+          Config amsHbaseLog4jProperties = cluster.getDesiredConfigByType(AMS_HBASE_LOG4J);
+          if(amsHbaseLog4jProperties != null){
+            Map<String, String> newProperties = new HashMap<>();
+
+            String content = amsHbaseLog4jProperties.getProperties().get("content");
+            content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_log_maxfilesize","ams_hbase_log_maxfilesize","hbase.log.maxfilesize=(\\w+)MB","256",newProperties);
+            content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_log_maxbackupindex","ams_hbase_log_maxbackupindex","hbase.log.maxbackupindex=(\\w+)","20",newProperties);
+            content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_security_log_maxfilesize","ams_hbase_security_log_maxfilesize","hbase.security.log.maxfilesize=(\\w+)MB","256",newProperties);
+            content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_security_log_maxbackupindex","ams_hbase_security_log_maxbackupindex","hbase.security.log.maxbackupindex=(\\w+)","20",newProperties);
+            newProperties.put("content",content);
+            updateConfigurationPropertiesForCluster(cluster,AMS_HBASE_LOG4J,newProperties,true,true);
+          }
+
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/39174ea9/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-log4j.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-log4j.xml
index ab42034..5a97804 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-hbase-log4j.xml
@@ -28,7 +28,7 @@
     <value-attributes>
       <unit>MB</unit>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ams_hbase_log_maxbackupindex</name>
@@ -39,7 +39,7 @@
       <type>int</type>
       <minimum>0</minimum>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ams_hbase_security_log_maxfilesize</name>
@@ -49,7 +49,7 @@
     <value-attributes>
       <unit>MB</unit>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ams_hbase_security_log_maxbackupindex</name>
@@ -60,7 +60,7 @@
       <type>int</type>
       <minimum>0</minimum>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>content</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/39174ea9/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-log4j.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-log4j.xml
index 5782e7c..ba67ec5 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-log4j.xml
@@ -26,7 +26,7 @@
     <value-attributes>
       <unit>MB</unit>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ams_log_number_of_backup_files</name>
@@ -37,7 +37,7 @@
       <type>int</type>
       <minimum>0</minimum>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>content</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/39174ea9/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index f531433..9e8da83 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -642,6 +642,352 @@ public class UpgradeCatalog250Test {
   }
 
   @Test
+  public void testAmsLog4jUpdateConfigs() throws Exception {
+    reset(clusters, cluster);
+    expect(clusters.getClusters()).andReturn(ImmutableMap.of("normal", cluster)).once();
+
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+            .addMockedMethod("createConfiguration")
+            .addMockedMethod("getClusters", new Class[] {})
+            .addMockedMethod("createConfig")
+            .withConstructor(actionManager, clusters, injector)
+            .createNiceMock();
+
+    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+
+    Map<String, String> oldAmsLog4j = ImmutableMap.of(
+            "content",
+            "#\n" +
+                    "# Licensed to the Apache Software Foundation (ASF) under one\n" +
+                    "# or more contributor license agreements.  See the NOTICE file\n" +
+                    "# distributed with this work for additional information\n" +
+                    "# regarding copyright ownership.  The ASF licenses this file\n" +
+                    "# to you under the Apache License, Version 2.0 (the\n" +
+                    "# \"License\"); you may not use this file except in compliance\n" +
+                    "# with the License.  You may obtain a copy of the License at\n" +
+                    "#\n" +
+                    "#     http://www.apache.org/licenses/LICENSE-2.0\n" +
+                    "#\n" +
+                    "# Unless required by applicable law or agreed to in writing, software\n" +
+                    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
+                    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
+                    "# See the License for the specific language governing permissions and\n" +
+                    "# limitations under the License.\n" +
+                    "#\n" +
+                    "\n" +
+                    "# Define some default values that can be overridden by system properties\n" +
+                    "ams.log.dir=.\n" +
+                    "ams.log.file=ambari-metrics-collector.log\n" +
+                    "\n" +
+                    "# Root logger option\n" +
+                    "log4j.rootLogger=INFO,file\n" +
+                    "\n" +
+                    "# Direct log messages to a log file\n" +
+                    "log4j.appender.file=org.apache.log4j.RollingFileAppender\n" +
+                    "log4j.appender.file.File=${ams.log.dir}/${ams.log.file}\n" +
+                    "log4j.appender.file.MaxFileSize=10MB\n" +
+                    "log4j.appender.file.MaxBackupIndex=12\n" +
+                    "log4j.appender.file.layout=org.apache.log4j.PatternLayout\n" +
+                    "log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n");
+
+    Map<String, String> expectedAmsLog4j = new HashMap<>();
+    expectedAmsLog4j.put("content","#\n" +
+                    "# Licensed to the Apache Software Foundation (ASF) under one\n" +
+                    "# or more contributor license agreements.  See the NOTICE file\n" +
+                    "# distributed with this work for additional information\n" +
+                    "# regarding copyright ownership.  The ASF licenses this file\n" +
+                    "# to you under the Apache License, Version 2.0 (the\n" +
+                    "# \"License\"); you may not use this file except in compliance\n" +
+                    "# with the License.  You may obtain a copy of the License at\n" +
+                    "#\n" +
+                    "#     http://www.apache.org/licenses/LICENSE-2.0\n" +
+                    "#\n" +
+                    "# Unless required by applicable law or agreed to in writing, software\n" +
+                    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
+                    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
+                    "# See the License for the specific language governing permissions and\n" +
+                    "# limitations under the License.\n" +
+                    "#\n" +
+                    "\n" +
+                    "# Define some default values that can be overridden by system properties\n" +
+                    "ams.log.dir=.\n" +
+                    "ams.log.file=ambari-metrics-collector.log\n" +
+                    "\n" +
+                    "# Root logger option\n" +
+                    "log4j.rootLogger=INFO,file\n" +
+                    "\n" +
+                    "# Direct log messages to a log file\n" +
+                    "log4j.appender.file=org.apache.log4j.RollingFileAppender\n" +
+                    "log4j.appender.file.File=${ams.log.dir}/${ams.log.file}\n" +
+                    "log4j.appender.file.MaxFileSize={{ams_log_max_backup_size}}MB\n" +
+                    "log4j.appender.file.MaxBackupIndex={{ams_log_number_of_backup_files}}\n" +
+                    "log4j.appender.file.layout=org.apache.log4j.PatternLayout\n" +
+                    "log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n");
+    expectedAmsLog4j.put("ams_log_max_backup_size","10");
+    expectedAmsLog4j.put("ams_log_number_of_backup_files","12");
+
+
+    Config mockAmsLog4j = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType("ams-log4j")).andReturn(mockAmsLog4j).atLeastOnce();
+    expect(mockAmsLog4j.getProperties()).andReturn(oldAmsLog4j).anyTimes();
+    Capture<Map<String, String>> AmsLog4jCapture = EasyMock.newCapture();
+    expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(AmsLog4jCapture), anyString(),
+            anyObject(Map.class))).andReturn(config).once();
+
+    Map<String, String> oldAmsHbaseLog4j = ImmutableMap.of(
+            "content","# Licensed to the Apache Software Foundation (ASF) under one\n" +
+                    "# or more contributor license agreements.  See the NOTICE file\n" +
+                    "# distributed with this work for additional information\n" +
+                    "# regarding copyright ownership.  The ASF licenses this file\n" +
+                    "# to you under the Apache License, Version 2.0 (the\n" +
+                    "# \"License\"); you may not use this file except in compliance\n" +
+                    "# with the License.  You may obtain a copy of the License at\n" +
+                    "#\n" +
+                    "#     http://www.apache.org/licenses/LICENSE-2.0\n" +
+                    "#\n" +
+                    "# Unless required by applicable law or agreed to in writing, software\n" +
+                    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
+                    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
+                    "# See the License for the specific language governing permissions and\n" +
+                    "# limitations under the License.\n" +
+                    "\n" +
+                    "\n" +
+                    "# Define some default values that can be overridden by system properties\n" +
+                    "hbase.root.logger=INFO,console\n" +
+                    "hbase.security.logger=INFO,console\n" +
+                    "hbase.log.dir=.\n" +
+                    "hbase.log.file=hbase.log\n" +
+                    "\n" +
+                    "# Define the root logger to the system property \"hbase.root.logger\".\n" +
+                    "log4j.rootLogger=${hbase.root.logger}\n" +
+                    "\n" +
+                    "# Logging Threshold\n" +
+                    "log4j.threshold=ALL\n" +
+                    "\n" +
+                    "#\n" +
+                    "# Daily Rolling File Appender\n" +
+                    "#\n" +
+                    "log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n" +
+                    "log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n" +
+                    "\n" +
+                    "# Rollver at midnight\n" +
+                    "log4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n" +
+                    "\n" +
+                    "# 30-day backup\n" +
+                    "#log4j.appender.DRFA.MaxBackupIndex=30\n" +
+                    "log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n" +
+                    "\n" +
+                    "# Pattern format: Date LogLevel LoggerName LogMessage\n" +
+                    "log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
+                    "\n" +
+                    "# Rolling File Appender properties\n" +
+                    "hbase.log.maxfilesize=256MB\n" +
+                    "hbase.log.maxbackupindex=20\n" +
+                    "\n" +
+                    "# Rolling File Appender\n" +
+                    "log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n" +
+                    "log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n" +
+                    "\n" +
+                    "log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\n" +
+                    "log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n" +
+                    "\n" +
+                    "log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n" +
+                    "log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
+                    "\n" +
+                    "#\n" +
+                    "# Security audit appender\n" +
+                    "#\n" +
+                    "hbase.security.log.file=SecurityAuth.audit\n" +
+                    "hbase.security.log.maxfilesize=256MB\n" +
+                    "hbase.security.log.maxbackupindex=20\n" +
+                    "log4j.appender.RFAS=org.apache.log4j.RollingFileAppender\n" +
+                    "log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\n" +
+                    "log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\n" +
+                    "log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\n" +
+                    "log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\n" +
+                    "log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n" +
+                    "log4j.category.SecurityLogger=${hbase.security.logger}\n" +
+                    "log4j.additivity.SecurityLogger=false\n" +
+                    "#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n" +
+                    "\n" +
+                    "#\n" +
+                    "# Null Appender\n" +
+                    "#\n" +
+                    "log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n" +
+                    "\n" +
+                    "#\n" +
+                    "# console\n" +
+                    "# Add \"console\" to rootlogger above if you want to use this\n" +
+                    "#\n" +
+                    "log4j.appender.console=org.apache.log4j.ConsoleAppender\n" +
+                    "log4j.appender.console.target=System.err\n" +
+                    "log4j.appender.console.layout=org.apache.log4j.PatternLayout\n" +
+                    "log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
+                    "\n" +
+                    "# Custom Logging levels\n" +
+                    "\n" +
+                    "log4j.logger.org.apache.zookeeper=INFO\n" +
+                    "#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\n" +
+                    "log4j.logger.org.apache.hadoop.hbase=INFO\n" +
+                    "# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\n" +
+                    "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\n" +
+                    "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n" +
+                    "#log4j.logger.org.apache.hadoop.dfs=DEBUG\n" +
+                    "# Set this class to log INFO only otherwise its OTT\n" +
+                    "# Enable this to get detailed connection error/retry logging.\n" +
+                    "# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n" +
+                    "\n" +
+                    "\n" +
+                    "# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n" +
+                    "#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n" +
+                    "\n" +
+                    "# Uncomment the below if you want to remove logging of client region caching'\n" +
+                    "# and scan of .META. messages\n" +
+                    "# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n" +
+                    "# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO\n");
+
+    Map<String, String> expectedAmsHbaseLog4j = new HashMap<String,String>();
+    expectedAmsHbaseLog4j.put("content","# Licensed to the Apache Software Foundation (ASF) under one\n" +
+            "# or more contributor license agreements.  See the NOTICE file\n" +
+            "# distributed with this work for additional information\n" +
+            "# regarding copyright ownership.  The ASF licenses this file\n" +
+            "# to you under the Apache License, Version 2.0 (the\n" +
+            "# \"License\"); you may not use this file except in compliance\n" +
+            "# with the License.  You may obtain a copy of the License at\n" +
+            "#\n" +
+            "#     http://www.apache.org/licenses/LICENSE-2.0\n" +
+            "#\n" +
+            "# Unless required by applicable law or agreed to in writing, software\n" +
+            "# distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
+            "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
+            "# See the License for the specific language governing permissions and\n" +
+            "# limitations under the License.\n" +
+            "\n" +
+            "\n" +
+            "# Define some default values that can be overridden by system properties\n" +
+            "hbase.root.logger=INFO,console\n" +
+            "hbase.security.logger=INFO,console\n" +
+            "hbase.log.dir=.\n" +
+            "hbase.log.file=hbase.log\n" +
+            "\n" +
+            "# Define the root logger to the system property \"hbase.root.logger\".\n" +
+            "log4j.rootLogger=${hbase.root.logger}\n" +
+            "\n" +
+            "# Logging Threshold\n" +
+            "log4j.threshold=ALL\n" +
+            "\n" +
+            "#\n" +
+            "# Daily Rolling File Appender\n" +
+            "#\n" +
+            "log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n" +
+            "log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n" +
+            "\n" +
+            "# Rollver at midnight\n" +
+            "log4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n" +
+            "\n" +
+            "# 30-day backup\n" +
+            "#log4j.appender.DRFA.MaxBackupIndex=30\n" +
+            "log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n" +
+            "\n" +
+            "# Pattern format: Date LogLevel LoggerName LogMessage\n" +
+            "log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
+            "\n" +
+            "# Rolling File Appender properties\n" +
+            "hbase.log.maxfilesize={{ams_hbase_log_maxfilesize}}MB\n" +
+            "hbase.log.maxbackupindex={{ams_hbase_log_maxbackupindex}}\n" +
+            "\n" +
+            "# Rolling File Appender\n" +
+            "log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n" +
+            "log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n" +
+            "\n" +
+            "log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\n" +
+            "log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n" +
+            "\n" +
+            "log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n" +
+            "log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
+            "\n" +
+            "#\n" +
+            "# Security audit appender\n" +
+            "#\n" +
+            "hbase.security.log.file=SecurityAuth.audit\n" +
+            "hbase.security.log.maxfilesize={{ams_hbase_security_log_maxfilesize}}MB\n" +
+            "hbase.security.log.maxbackupindex={{ams_hbase_security_log_maxbackupindex}}\n" +
+            "log4j.appender.RFAS=org.apache.log4j.RollingFileAppender\n" +
+            "log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\n" +
+            "log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\n" +
+            "log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\n" +
+            "log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\n" +
+            "log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n" +
+            "log4j.category.SecurityLogger=${hbase.security.logger}\n" +
+            "log4j.additivity.SecurityLogger=false\n" +
+            "#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n" +
+            "\n" +
+            "#\n" +
+            "# Null Appender\n" +
+            "#\n" +
+            "log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n" +
+            "\n" +
+            "#\n" +
+            "# console\n" +
+            "# Add \"console\" to rootlogger above if you want to use this\n" +
+            "#\n" +
+            "log4j.appender.console=org.apache.log4j.ConsoleAppender\n" +
+            "log4j.appender.console.target=System.err\n" +
+            "log4j.appender.console.layout=org.apache.log4j.PatternLayout\n" +
+            "log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
+            "\n" +
+            "# Custom Logging levels\n" +
+            "\n" +
+            "log4j.logger.org.apache.zookeeper=INFO\n" +
+            "#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\n" +
+            "log4j.logger.org.apache.hadoop.hbase=INFO\n" +
+            "# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\n" +
+            "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\n" +
+            "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n" +
+            "#log4j.logger.org.apache.hadoop.dfs=DEBUG\n" +
+            "# Set this class to log INFO only otherwise its OTT\n" +
+            "# Enable this to get detailed connection error/retry logging.\n" +
+            "# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n" +
+            "\n" +
+            "\n" +
+            "# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n" +
+            "#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n" +
+            "\n" +
+            "# Uncomment the below if you want to remove logging of client region caching'\n" +
+            "# and scan of .META. messages\n" +
+            "# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n" +
+            "# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO\n");
+    expectedAmsHbaseLog4j.put("ams_hbase_log_maxfilesize","256");
+    expectedAmsHbaseLog4j.put("ams_hbase_log_maxbackupindex","20");
+    expectedAmsHbaseLog4j.put("ams_hbase_security_log_maxfilesize","256");
+    expectedAmsHbaseLog4j.put("ams_hbase_security_log_maxbackupindex","20");
+
+    Config mockAmsHbaseLog4j = easyMockSupport.createNiceMock(Config.class);
+    expect(cluster.getDesiredConfigByType("ams-hbase-log4j")).andReturn(mockAmsHbaseLog4j).atLeastOnce();
+    expect(mockAmsHbaseLog4j.getProperties()).andReturn(oldAmsHbaseLog4j).anyTimes();
+    Capture<Map<String, String>> AmsHbaseLog4jCapture = EasyMock.newCapture();
+    expect(controller.createConfig(anyObject(Cluster.class), anyString(), capture(AmsHbaseLog4jCapture), anyString(),
+            anyObject(Map.class))).andReturn(config).once();
+
+    replay(clusters, cluster);
+    replay(controller, injector2);
+    replay(mockAmsLog4j,mockAmsHbaseLog4j);
+    new UpgradeCatalog250(injector2).updateAMSConfigs();
+    easyMockSupport.verifyAll();
+
+    Map<String, String> updatedAmsLog4jProperties = AmsLog4jCapture.getValue();
+    assertTrue(Maps.difference(expectedAmsLog4j, updatedAmsLog4jProperties).areEqual());
+
+    Map<String, String> updatedAmsHbaseLog4jProperties = AmsHbaseLog4jCapture.getValue();
+    assertTrue(Maps.difference(expectedAmsHbaseLog4j, updatedAmsHbaseLog4jProperties).areEqual());
+
+  }
+
+  @Test
   public void testLogSearchUpdateConfigs() throws Exception {
     reset(clusters, cluster);
     expect(clusters.getClusters()).andReturn(ImmutableMap.of("normal", cluster)).once();


[35/50] [abbrv] ambari git commit: AMBARI-19541. Add log rotation settings - handle HDP upgrade scenario (Madhuvanthi Radhakrishnan via smohanty)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
index 8bee56d..1e4da76 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml
@@ -75,6 +75,10 @@
       <supports-auto-skip-failure>false</supports-auto-skip-failure>
       <parallel-scheduler/>
 
+      <service name="ATLAS">
+        <component>ATLAS_SERVER</component>
+      </service>
+
       <service name="FLUME">
         <component>FLUME_HANDLER</component>
       </service>
@@ -271,6 +275,13 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db"/>
       </execute-stage>
 
+      <!--Yarn-->
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Parameterizing Yarn Log4J Properties Resource Manager">
+        <task xsi:type="configure" id="yarn_log4j_parameterize">
+          <summary>Updating the Yarn Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- YARN -->
       <execute-stage service="YARN" component="NODEMANAGER" title="Add Spark2 shuffle">
         <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/>
@@ -294,6 +305,12 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server">
+        <task xsi:type="configure" id="oozie_log4j_parameterize">
+          <summary>Updating the Oozie Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Fix Oozie admin users">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixOozieAdminUsers">
           <summary>Fix oozie admin users</summary>
@@ -305,6 +322,13 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/>
       </execute-stage>
 
+      <!--FALCON-->
+      <execute-stage service="FALCON" component="FALCON_SERVER" title="Parameterizing Falcon Log4J Properties">
+        <task xsi:type="configure" id="falcon_log4j_parameterize">
+          <summary>Updating the Falcon Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- RANGER -->
       <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag"/>
@@ -333,6 +357,23 @@
         </task>
       </execute-stage>
 
+      <!--RANGER-->
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
+        <task xsi:type="configure" id="admin_log4j_parameterize">
+          <summary>Updating the Ranger admin Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Parameterizing Ranger Usersync Log4J Properties">
+        <task xsi:type="configure" id="usersync_log4j_parameterize">
+          <summary>Updating the Ranger usersync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="RANGER" component="RANGER_TAGSYNC" title="Parameterizing Ranger Tagsync Log4J Properties">
+        <task xsi:type="configure" id="tagsync_log4j_parameterize">
+          <summary>Updating the Ranger tagsync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="RANGER" component="RANGER_ADMIN" title="Configuring Ranger Alerts">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerWebAlertConfigAction">
           <summary>Configuring Ranger Alerts</summary>
@@ -348,6 +389,12 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="HDFS" component="NAMENODE" title="Parameterizing Hdfs Log4J Properties">
+        <task xsi:type="configure" id="hdfs_log4j_parameterize">
+          <summary>Updating the Hdfs Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- SQOOP -->
       <execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop to remove Atlas Configs">
         <!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. -->
@@ -381,11 +428,30 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db"/>
       </execute-stage>
 
+      <!--HBASE-->
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Parameterizing HBase Log4J Properties">
+        <task xsi:type="configure" id="hbase_log4j_parameterize">
+          <summary>Updating the Hbase Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- KNOX -->
       <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db"/>
       </execute-stage>
 
+      <!--KNOX-->
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Gateway Log4J Properties">
+        <task xsi:type="configure" id="knox_gateway_log4j_parameterize">
+          <summary>Updating the Knox Gateway Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Ldap Log4J Properties">
+        <task xsi:type="configure" id="knox_ldap_log4j_parameterize">
+          <summary>Updating the Knox Ldap Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- STORM -->
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm Nimbus">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_storm_audit_db"/>
@@ -418,18 +484,47 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db"/>
       </execute-stage>
 
+      <!--KAFKA-->
+      <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
+        <task xsi:type="configure" id="kafka_log4j_parameterize">
+          <summary>Updating the Kafka Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- RANGER KMS -->
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS Server">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db"/>
       </execute-stage>
 
+      <!--RANGER-KMS-->
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Parameterizing Ranger Kms Log4J Properties">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Calculating Ranger Properties">
         <condition xsi:type="security" type="kerberos"/>
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKmsProxyConfig">
           <summary>Adding Ranger proxy user properties</summary>
         </task>
       </execute-stage>
-    </group>
+
+      <!--ZOOKEEPER-->
+      <execute-stage service="ZOOKEEPER" component="ZOOKEEPER_SERVER" title="Parameterizing Zookeeper Log4J Properties">
+        <task xsi:type="configure" id="zookeeper_log4j_parameterize">
+          <summary>Updating the Zookeeper Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--ATLAS-->
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Parameterizing Atlas Log4J Properties">
+        <task xsi:type="configure" id="atlas_log4j_parameterize">
+          <summary>Updating the Atlas Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+     </group>
 
     <!--
     After processing this group, the user-specified Kerberos descriptor will be updated to work with

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
index 23b2694..647cb45 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml
@@ -275,6 +275,11 @@
       <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Yarn Resourcemanager">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db"/>
       </execute-stage>
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Parameterizing Yarn Log4J Properties Resource Manager">
+        <task xsi:type="configure" id="yarn_log4j_parameterize">
+          <summary>Updating the Yarn Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
 
       <!-- YARN -->
       <execute-stage service="YARN" component="NODEMANAGER" title="Add Spark2 shuffle">
@@ -305,10 +310,21 @@
         </task>
       </execute-stage>
 
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server">
+        <task xsi:type="configure" id="oozie_log4j_parameterize">
+          <summary>Updating the Oozie Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
     <!--FALCON-->
       <execute-stage service="FALCON" component="FALCON_SERVER" title="Apply config changes for Falcon">
         <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/>
       </execute-stage>
+      <execute-stage service="FALCON" component="FALCON_SERVER" title="Parameterizing Falcon Log4J Properties">
+        <task xsi:type="configure" id="falcon_log4j_parameterize">
+          <summary>Updating the Falcon Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
 
       <!-- RANGER -->
       <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
@@ -331,6 +347,22 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag"/>
       </execute-stage>
 
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
+        <task xsi:type="configure" id="admin_log4j_parameterize">
+          <summary>Updating the Ranger admin Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Parameterizing Ranger Usersync Log4J Properties">
+        <task xsi:type="configure" id="usersync_log4j_parameterize">
+          <summary>Updating the Ranger usersync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="RANGER" component="RANGER_TAGSYNC" title="Parameterizing Ranger Tagsync Log4J Properties">
+        <task xsi:type="configure" id="tagsync_log4j_parameterize">
+          <summary>Updating the Ranger tagsync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties">
         <condition xsi:type="security" type="kerberos"/>
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation">
@@ -357,6 +389,13 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/>
       </execute-stage>
 
+      <!--HDFS-->
+      <execute-stage service="HDFS" component="NAMENODE" title="Parameterizing Hdfs Log4J Properties">
+        <task xsi:type="configure" id="hdfs_log4j_parameterize">
+          <summary>Updating the Hdfs Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- SQOOP -->
       <execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop to remove Atlas Configs">
         <!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. -->
@@ -390,10 +429,28 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db"/>
       </execute-stage>
 
+      <!--HBASE-->
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Parameterizing HBase Log4J Properties">
+        <task xsi:type="configure" id="hbase_log4j_parameterize">
+          <summary>Updating the Hbase Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!-- KNOX -->
       <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db"/>
       </execute-stage>
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Gateway Log4J Properties">
+        <task xsi:type="configure" id="knox_gateway_log4j_parameterize">
+          <summary>Updating the Knox Gateway Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Ldap Log4J Properties">
+        <task xsi:type="configure" id="knox_ldap_log4j_parameterize">
+          <summary>Updating the Knox Ldap Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
 
       <!-- STORM -->
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm Nimbus">
@@ -427,18 +484,46 @@
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
+        <task xsi:type="configure" id="kafka_log4j_parameterize">
+          <summary>Updating the Kafka Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+
       <!-- RANGER KMS -->
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS Server">
         <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db"/>
       </execute-stage>
 
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Parameterizing Ranger Kms Log4J Properties">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Calculating Ranger Properties">
         <condition xsi:type="security" type="kerberos"/>
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKmsProxyConfig">
           <summary>Adding Ranger proxy user properties</summary>
         </task>
       </execute-stage>
-    </group>
+
+      <!--ATLAS-->
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Parameterizing Atlas Log4J Properties">
+        <task xsi:type="configure" id="atlas_log4j_parameterize">
+          <summary>Updating the Atlas Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--ZOOKEEPER-->
+      <execute-stage service="ZOOKEEPER" component="ZOOKEEPER_SERVER" title="Parameterizing Zookeeper Log4J Properties">
+        <task xsi:type="configure" id="zookeeper_log4j_parameterize">
+          <summary>Updating the Zookeeper Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+     </group>
 
     <!--
     After processing this group, the user-specified Kerberos descriptor will be updated to work with

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
index e6f952d..fac26de 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.5.xml
@@ -543,6 +543,10 @@
   <processing>
     <service name="ZOOKEEPER">
       <component name="ZOOKEEPER_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="zookeeper_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade/>
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -558,6 +562,7 @@
     <service name="RANGER">
       <component name="RANGER_ADMIN">
         <pre-upgrade>
+          <task xsi:type="configure" id="admin_log4j_parameterize" />
           <task xsi:type="execute" hosts="all">
             <summary>Stop Ranger Admin</summary>
             <script>scripts/ranger_admin.py</script>
@@ -606,6 +611,19 @@
       </component>
 
       <component name="RANGER_USERSYNC">
+        <pre-upgrade>
+          <task xsi:type="configure" id="usersync_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade/>
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+      <component name="RANGER_TAGSYNC">
+        <pre-upgrade>
+          <task xsi:type="configure" id="tagsync_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -616,7 +634,7 @@
       <component name="RANGER_KMS_SERVER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db" />
-
+          <task xsi:type="configure" id="kms_log4j_parameterize" />
           <task xsi:type="execute" hosts="any" sequential="true">
             <summary>Upgrading Ranger KMS database schema</summary>
             <script>scripts/kms_server.py</script>
@@ -643,6 +661,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db" />
           <task xsi:type="configure" id="hdp_2_5_0_0_add_protocol_compatibility" />
+          <task xsi:type="configure" id="kafka_log4j_parameterize" />
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -658,6 +677,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments"/>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db" />
+          <task xsi:type="configure" id="hdfs_log4j_parameterize" />
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -737,6 +757,8 @@
             <summary>Calculating Yarn Properties for Spark Shuffle</summary>
           </task>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db" />
+          <task xsi:type="configure" id="yarn_log4j_parameterize" />
+
         </pre-upgrade>
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
@@ -768,6 +790,7 @@
       <component name="HBASE_MASTER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db" />
+          <task xsi:type="configure" id="hbase_log4j_parameterize" />
         </pre-upgrade>
         
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -912,6 +935,7 @@
     <service name="OOZIE">
       <component name="OOZIE_SERVER">
         <pre-upgrade>
+          <task xsi:type="configure" id="oozie_log4j_parameterize" />
           <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation">
             <summary>Adjusting Oozie properties</summary>
           </task>
@@ -965,6 +989,7 @@
       <component name="FALCON_SERVER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/>
+          <task xsi:type="configure" id="falcon_log4j_parameterize" />
         </pre-upgrade>
         <pre-downgrade/>
         <upgrade>
@@ -982,6 +1007,8 @@
       <component name="KNOX_GATEWAY">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db" />
+          <task xsi:type="configure" id="knox_gateway_log4j_parameterize" />
+          <task xsi:type="configure" id="knox_ldap_log4j_parameterize" />
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -1140,5 +1167,16 @@
         </upgrade>
       </component>
     </service>
+    <service name="ATLAS">
+      <component name="ATLAS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="atlas_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+    </service>
   </processing>
 </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
index dc21124..d75cb24 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.6.xml
@@ -548,6 +548,10 @@
   <processing>
     <service name="ZOOKEEPER">
       <component name="ZOOKEEPER_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="zookeeper_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -574,6 +578,7 @@
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site" />
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property" />
           <task xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag"/>
+          <task xsi:type="configure" id="admin_log4j_parameterize" />
 
           <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation">
             <summary>Calculating Ranger Properties</summary>
@@ -612,6 +617,19 @@
       </component>
 
       <component name="RANGER_USERSYNC">
+        <pre-upgrade>
+          <task xsi:type="configure" id="usersync_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
+        <upgrade>
+          <task xsi:type="restart-task" />
+        </upgrade>
+      </component>
+      <component name="RANGER_TAGSYNC">
+        <pre-upgrade>
+          <task xsi:type="configure" id="tagsync_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -622,7 +640,7 @@
       <component name="RANGER_KMS_SERVER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db" />
-
+          <task xsi:type="configure" id="kms_log4j_parameterize" />
           <task xsi:type="execute" hosts="any" sequential="true">
             <summary>Upgrading Ranger KMS database schema</summary>
             <script>scripts/kms_server.py</script>
@@ -649,6 +667,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db" />
           <task xsi:type="configure" id="hdp_2_5_0_0_add_protocol_compatibility" />
+          <task xsi:type="configure" id="kafka_log4j_parameterize" />
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -664,6 +683,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments"/>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db" />
+          <task xsi:type="configure" id="hdfs_log4j_parameterize" />
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -743,6 +763,7 @@
             <summary>Calculating Yarn Properties for Spark Shuffle</summary>
           </task>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db" />
+          <task xsi:type="configure" id="yarn_log4j_parameterize" />
         </pre-upgrade>
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
 
@@ -774,6 +795,7 @@
       <component name="HBASE_MASTER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db" />
+          <task xsi:type="configure" id="hbase_log4j_parameterize" />
         </pre-upgrade>
         
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -918,6 +940,7 @@
     <service name="OOZIE">
       <component name="OOZIE_SERVER">
         <pre-upgrade>
+          <task xsi:type="configure" id="oozie_log4j_parameterize" />
           <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation">
             <summary>Adjusting Oozie properties</summary>
           </task>
@@ -971,6 +994,7 @@
       <component name="FALCON_SERVER">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/>
+          <task xsi:type="configure" id="falcon_log4j_parameterize" />
         </pre-upgrade>
         <pre-downgrade/>
         <upgrade>
@@ -988,6 +1012,8 @@
       <component name="KNOX_GATEWAY">
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db" />
+          <task xsi:type="configure" id="knox_ldap_log4j_parameterize" />
+          <task xsi:type="configure" id="knox_gateway_log4j_parameterize" />
         </pre-upgrade>
 
         <pre-downgrade /> <!--  no-op to prevent config changes on downgrade -->
@@ -1146,5 +1172,17 @@
         </upgrade>
       </component>
     </service>
+    <service name="ATLAS">
+    <component name="ATLAS_SERVER">
+      <pre-upgrade>
+        <task xsi:type="configure" id="atlas_log4j_parameterize" />
+      </pre-upgrade>
+      <pre-downgrade />
+      <upgrade>
+        <task xsi:type="restart-task" />
+      </upgrade>
+    </component>
+    </service>
+
   </processing>
 </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index 40052d8..d5dec43 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -56,7 +56,112 @@
         </changes>
       </component>
     </service>
-
+    <service name="ZOOKEEPER">
+          <component name="ZOOKEEPER_SERVER">
+            <changes>
+              <!-- Zookeeper Rolling properties for log4j need to be parameterized. -->
+              <definition xsi:type="configure" id="zookeeper_log4j_parameterize" summary="Parameterizing ZooKeeper Log4J Properties">
+                <type>zookeeper-log4j</type>
+                <set key="zookeeper_log_max_backup_size" value="10"/>
+                <set key="zookeeper_log_number_of_backup_files" value="10"/>
+                <regex-replace  key="content" find="^log4j.appender.ROLLINGFILE.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.ROLLINGFILE.MaxFileSize={{zookeeper_log_max_backup_size}}MB"/>
+                <regex-replace key="content" find="^#log4j.appender.ROLLINGFILE.MaxBackupIndex=([0-9]+)" replace-with="#log4j.appender.ROLLINGFILE.MaxBackupIndex={{zookeeper_log_number_of_backup_files}}"/>
+              </definition>
+            </changes>
+          </component>
+    </service>
+    <service name="ATLAS">
+      <component name="ATLAS_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="atlas_log4j_parameterize" summary="Parameterizing Atlas Log4J Properties">
+            <type>atlas-log4j</type>
+            <set key="atlas_log_max_backup_size" value="256"/>
+            <set key="atlas_log_number_of_backup_files" value="20"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;\n&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{atlas_log_max_backup_size}}MB&quot; /&gt;"/>
+            <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;\n&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{atlas_log_number_of_backup_files}}&quot; /&gt;"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+   <service name="OOZIE">
+    <component name="OOZIE_SERVER">
+      <changes>
+        <!-- Oozie Rolling properties for log4j need to be parameterized. -->
+        <definition xsi:type="configure" id="oozie_log4j_parameterize" summary="Parameterizing Oozie Log4J Properties">
+          <type>oozie-log4j</type>
+          <set key="oozie_log_maxhistory" value="720"/>
+          <regex-replace key="content" find="^log4j.appender.oozie.RollingPolicy.MaxHistory=([0-9]+)" replace-with="log4j.appender.oozie.RollingPolicy.MaxHistory={{oozie_log_maxhistory}}"/>
+        </definition>
+      </changes>
+    </component>
+  </service>
+  <service name="YARN">
+    <component name="RESOURCEMANAGER">
+      <changes>
+        <!-- Yarn Rolling properties for log4j need to be parameterized. -->
+        <definition xsi:type="configure" id="yarn_log4j_parameterize" summary="Parameterizing Yarn Log4J Properties">
+          <type>yarn-log4j</type>
+          <set key="yarn_rm_summary_log_max_backup_size" value="256"/>
+          <set key="yarn_rm_summary_log_number_of_backup_files" value="20"/>
+          <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB"/>
+          <regex-replace key="content" find="^log4j.appender.RMSUMMARY.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}"/>
+        </definition>
+      </changes>
+    </component>
+  </service>
+  <service name="HDFS">
+    <component name="NAMENODE">
+      <changes>
+        <!-- HDFS Rolling properties for log4j need to be parameterized. -->
+        <definition xsi:type="configure" id="hdfs_log4j_parameterize" summary="Parameterizing Hdfs Log4J Properties">
+          <type>hdfs-log4j</type>
+          <set key="hadoop_log_max_backup_size" value="256"/>
+          <set key="hadoop_log_number_of_backup_files" value="10"/>
+          <set key="hadoop_security_log_max_backup_size" value="256"/>
+          <set key="hadoop_security_log_number_of_backup_files" value="20"/>
+          <regex-replace  key="content" find="log4j.appender.RFA.MaxFileSize=([0-9]+)MB" replace-with="log4j.appender.RFA.MaxFileSize={{hadoop_log_max_backup_size}}MB"/>
+          <regex-replace  key="content" find="log4j.appender.RFA.MaxBackupIndex=([0-9]+)" replace-with="log4j.appender.RFA.MaxBackupIndex={{hadoop_log_number_of_backup_files}}"/>
+          <regex-replace  key="content" find="hadoop.security.log.maxfilesize=([0-9]+)MB" replace-with="hadoop.security.log.maxfilesize={{hadoop_security_log_max_backup_size}}MB"/>
+          <regex-replace  key="content" find="hadoop.security.log.maxbackupindex=([0-9]+)" replace-with="hadoop.security.log.maxbackupindex={{hadoop_security_log_number_of_backup_files}}"/>
+        </definition>
+      </changes>
+    </component>
+  </service>
+  <service name="HBASE">
+    <component name="HBASE_MASTER">
+    <changes>
+    <!-- HBase Rolling properties for log4j need to be parameterized. -->
+      <definition xsi:type="configure" id="hbase_log4j_parameterize" summary="Parameterizing HBase Log4J Properties">
+          <type>hbase-log4j</type>
+          <set key="hbase_log_maxfilesize" value="256"/>
+          <set key="hbase_log_maxbackupindex" value="20"/>
+          <set key="hbase_security_log_maxfilesize" value="256"/>
+          <set key="hbase_security_log_maxbackupindex" value="20"/>
+          <regex-replace key="content" find="hbase.log.maxfilesize=([0-9]+)MB" replace-with="hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB"/>
+          <regex-replace key="content" find="hbase.log.maxbackupindex=([0-9]+)" replace-with="hbase.log.maxbackupindex={{hbase_log_maxbackupindex}}"/>
+          <regex-replace key="content" find="hbase.security.log.maxfilesize=([0-9]+)MB" replace-with="hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB"/>
+          <regex-replace key="content" find="hbase.security.log.maxbackupindex=([0-9]+)" replace-with="hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}}"/>
+      </definition>
+    </changes>
+    </component>
+  </service>
+  <service name="FALCON">
+      <component name="FALCON_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="falcon_log4j_parameterize" summary="Parameterizing Falcon Log4J Properties">
+          <type>falcon-log4j</type>
+          <set key="falcon_log_maxfilesize" value="256"/>
+          <set key="falcon_log_maxbackupindex" value="20"/>
+          <set key="falcon_security_log_maxfilesize" value="256"/>
+          <set key="falcon_security_log_maxbackupindex" value="20"/>
+          <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{falcon_log_maxfilesize}}MB&quot; /&gt;"/>
+          <replace key="content" find="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;FILE&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxBackupIndex&quot; value=&quot;{{falcon_log_maxbackupindex}}&quot; /&gt;"/>
+          <replace key="content" find="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxFileSize&quot; value=&quot;{{falcon_security_log_maxfilesize}}MB&quot;/&gt;"/>
+          <replace key="content" find="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;" replace-with="&lt;appender name=&quot;SECURITY&quot; class=&quot;org.apache.log4j.DailyRollingFileAppender&quot;&gt;&#xA;&lt;param name=&quot;MaxBackupIndex&quot; value=&quot;{{falcon_security_log_maxbackupindex}}&quot;/&gt;"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
     <service name="RANGER">
       <component name="RANGER_ADMIN">
         <changes>
@@ -64,9 +169,92 @@
             <type>ranger-env</type>
             <transfer operation="delete" delete-key="bind_anonymous" />
           </definition>
+          <definition xsi:type="configure" id="admin_log4j_parameterize" summary="Parameterizing Ranger Log4J Properties">
+            <type>admin-log4j</type>
+            <set key="ranger_xa_log_maxfilesize" value="256"/>
+            <set key="ranger_xa_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxFileSize={{ranger_xa_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxBackupIndex={{ranger_xa_log_maxbackupindex}}"/>
+          </definition>
+        </changes>
+      </component>
+      <component name="RANGER_USERSYNC">
+        <changes>
+          <definition xsi:type="configure" id="usersync_log4j_parameterize" summary="Parameterizing Ranger Usersync Log4J Properties">
+            <type>usersync-log4j</type>
+            <set key="ranger_usersync_log_maxfilesize" value="256"/>
+            <set key="ranger_usersync_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_usersync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_usersync_log_maxbackupindex}}"/>
+          </definition>
+        </changes>
+      </component>
+      <component name="RANGER_TAGSYNC">
+        <changes>
+          <definition xsi:type="configure" id="tagsync_log4j_parameterize" summary="Parameterizing Ranger Tagsync Log4J Properties">
+            <type>tagsync-log4j</type>
+            <set key="ranger_tagsync_log_maxfilesize" value="256"/>
+            <set key="ranger_tagsync_log_number_of_backup_files" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_tagsync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_tagsync_log_number_of_backup_files}}"/>
+          </definition>
         </changes>
       </component>
     </service>
+    <service name="RANGER_KMS">
+    <component name="RANGER_KMS_SERVER">
+    <changes>
+      <definition xsi:type="configure" id="kms_log4j_parameterize" summary="Parameterizing Ranger KMS Log4J Properties">
+        <type>kms-log4j</type>
+        <set key="ranger_kms_log_maxfilesize" value="256"/>
+        <set key="ranger_kms_log_maxbackupindex" value="20"/>
+        <set key="ranger_kms_audit_log_maxfilesize" value="256"/>
+        <set key="ranger_kms_audit_log_maxbackupindex" value="20"/>
+        <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxFileSize = {{ranger_kms_log_maxfilesize}}MB"/>
+        <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxBackupIndex = {{ranger_kms_log_maxbackupindex}}"/>
+        <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxFileSize = {{ranger_kms_audit_log_maxfilesize}}MB"/>
+        <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxBackupIndex = {{ranger_kms_audit_log_maxbackupindex}}"/>
+      </definition>
+    </changes>
+    </component>
+    </service>
+    <service name="KAFKA">
+    <component name="KAFKA_BROKER">
+    <changes>
+      <definition xsi:type="configure" id="kafka_log4j_parameterize" summary="Parameterizing Kafka Log4J Properties">
+        <type>kafka-log4j</type>
+        <set key="kafka_log_maxfilesize" value="256"/>
+        <set key="kafka_log_maxbackupindex" value="20"/>
+        <set key="controller_log_maxfilesize" value="256"/>
+        <set key="controller_log_maxbackupindex" value="20"/>
+        <replace key="content" find="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kafkaAppender.MaxFileSize = {{kafka_log_maxfilesize}}MB"/>
+        <replace key="content" find="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kafkaAppender.MaxBackupIndex = {{kafka_log_maxbackupindex}}"/>
+        <replace key="content" find="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.controllerAppender.MaxFileSize = {{controller_log_maxfilesize}}MB"/>
+        <replace key="content" find="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.controllerAppender.MaxBackupIndex = {{controller_log_maxbackupindex}}"/>
+      </definition>
+    </changes>
+    </component>
+    </service>
+    <service name="KNOX">
+    <component name="KNOX_GATEWAY">
+    <changes>
+      <definition xsi:type="configure" id="knox_gateway_log4j_parameterize" summary="Parameterizing Knox Gateway Log4J Properties">
+        <type>gateway-log4j</type>
+        <set key="knox_gateway_log_maxfilesize" value="256"/>
+        <set key="knox_gateway_log_maxbackupindex" value="20"/>
+        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_gateway_log_maxfilesize}}MB"/>
+        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_gateway_log_maxbackupindex}}"/>
+        </definition>
+      <definition xsi:type="configure" id="knox_ldap_log4j_parameterize" summary="Parameterizing Knox Ldap Log4J Properties">
+        <type>ldap-log4j</type>
+        <set key="knox_ldap_log_maxfilesize" value="256"/>
+        <set key="knox_ldap_log_maxbackupindex" value="20"/>
+        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxFileSize = {{knox_ldap_log_maxfilesize}}MB"/>
+        <replace key="content" find="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.drfa.MaxBackupIndex = {{knox_ldap_log_maxbackupindex}}"/>
+      </definition>
+    </changes>
+    </component>
+    </service>
 
   </services>
 </upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 09608a0..5ef959b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -285,6 +285,13 @@
         </task>
       </execute-stage>
 
+      <!--Yarn-->
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Parameterizing Yarn Log4J Properties Resource Manager">
+        <task xsi:type="configure" id="yarn_log4j_parameterize">
+          <summary>Updating the Yarn Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
       <!--TEZ-->
       <execute-stage service="TEZ" component="TEZ_CLIENT" title="Verify LZO codec path for Tez">
         <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath">
@@ -298,6 +305,39 @@
           <summary>Adjusting Oozie properties</summary>
         </task>
       </execute-stage>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Parameterizing Oozie Log4J Properties">
+        <task xsi:type="configure" id="oozie_log4j_parameterize">
+          <summary>Updating the Oozie Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--HDFS-->
+      <execute-stage service="HDFS" component="NAMENODE" title="Parameterizing Hdfs Log4J Properties">
+        <task xsi:type="configure" id="hdfs_log4j_parameterize">
+          <summary>Updating the Hdfs Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--HBASE-->
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Parameterizing HBase Log4J Properties">
+        <task xsi:type="configure" id="hbase_log4j_parameterize">
+          <summary>Updating the Hbase Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--ZOOKEEPER-->
+      <execute-stage service="ZOOKEEPER" component="ZOOKEEPER_SERVER" title="Parameterizing Zookeeper Log4J Properties">
+        <task xsi:type="configure" id="zookeeper_log4j_parameterize">
+          <summary>Updating the Zookeeper Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--FALCON-->
+      <execute-stage service="FALCON" component="FALCON_SERVER" title="Parameterizing Falcon Log4J Properties">
+        <task xsi:type="configure" id="falcon_log4j_parameterize">
+          <summary>Updating the Falcon Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
 
       <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus">
         <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
@@ -307,6 +347,61 @@
       <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
         <task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
       </execute-stage>
+
+      <!--RANGER-->
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
+        <task xsi:type="configure" id="admin_log4j_parameterize">
+          <summary>Updating the Ranger admin Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Parameterizing Ranger Usersync Log4J Properties">
+        <task xsi:type="configure" id="usersync_log4j_parameterize">
+          <summary>Updating the Ranger usersync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="RANGER" component="RANGER_TAGSYNC" title="Parameterizing Ranger Tagsync Log4J Properties">
+        <task xsi:type="configure" id="tagsync_log4j_parameterize">
+          <summary>Updating the Ranger tagsync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--RANGER-KMS-->
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Parameterizing Ranger Kms Log4J Properties">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--ATLAS-->
+      <execute-stage service="ATLAS" component="ATLAS_SERVER" title="Parameterizing Atlas Log4J Properties">
+        <task xsi:type="configure" id="atlas_log4j_parameterize">
+          <summary>Updating the Atlas Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--KAFKA-->
+      <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Parameterizing Kafka Log4J Properties">
+        <task xsi:type="configure" id="kafka_log4j_parameterize">
+          <summary>Updating the Kafka Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--KNOX-->
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Gateway Log4J Properties">
+        <task xsi:type="configure" id="knox_gateway_log4j_parameterize">
+          <summary>Updating the Knox Gateway Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Parameterizing Knox Ldap Log4J Properties">
+        <task xsi:type="configure" id="knox_ldap_log4j_parameterize">
+          <summary>Updating the Knox Ldap Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <!--STORM-->
+      <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus">
+        <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/>
+      </execute-stage>
     </group>
 
     <!--

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 949a174..b13a6f0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -464,6 +464,10 @@
   <processing>
     <service name="ZOOKEEPER">
       <component name="ZOOKEEPER_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="zookeeper_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -479,6 +483,7 @@
     <service name="RANGER">
       <component name="RANGER_ADMIN">
         <pre-upgrade>
+          <task xsi:type="configure" id="admin_log4j_parameterize" />
           <task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
           <task xsi:type="execute" hosts="all">
             <summary>Stop Ranger Admin</summary>
@@ -515,6 +520,10 @@
       </component>
 
       <component name="RANGER_USERSYNC">
+        <pre-upgrade>
+          <task xsi:type="configure" id="usersync_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -523,6 +532,7 @@
       <component name="RANGER_TAGSYNC">
 
         <pre-upgrade>
+          <task xsi:type="configure" id="tagsync_log4j_parameterize" />
           <task xsi:type="execute" hosts="all">
             <script>scripts/ranger_tagsync.py</script>
             <function>configure_atlas_user_for_tagsync</function>
@@ -540,6 +550,7 @@
     <service name="RANGER_KMS">
       <component name="RANGER_KMS_SERVER">
         <pre-upgrade>
+          <task xsi:type="configure" id="kms_log4j_parameterize" />
           <task xsi:type="execute" hosts="any" sequential="true">
             <summary>Upgrading Ranger KMS database schema</summary>
             <script>scripts/kms_server.py</script>
@@ -563,6 +574,10 @@
 
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="kafka_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -571,6 +586,10 @@
 
     <service name="HDFS">
       <component name="NAMENODE">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hdfs_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -645,6 +664,10 @@
       </component>
 
       <component name="RESOURCEMANAGER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="yarn_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -665,6 +688,10 @@
 
     <service name="HBASE">
       <component name="HBASE_MASTER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="hbase_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -807,6 +834,7 @@
     <service name="OOZIE">
       <component name="OOZIE_SERVER">
         <pre-upgrade>
+          <task xsi:type="configure" id="oozie_log4j_parameterize" />
           <task xsi:type="execute" hosts="all" sequential="true">
             <summary>Shut down all Oozie servers</summary>
             <script>scripts/oozie_server.py</script>
@@ -855,6 +883,10 @@
 
     <service name="FALCON">
       <component name="FALCON_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="falcon_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -868,6 +900,11 @@
 
     <service name="KNOX">
       <component name="KNOX_GATEWAY">
+        <pre-upgrade>
+          <task xsi:type="configure" id="knox_gateway_log4j_parameterize" />
+          <task xsi:type="configure" id="knox_ldap_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>
@@ -940,6 +977,10 @@
 
     <service name="ATLAS">
       <component name="ATLAS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="configure" id="atlas_log4j_parameterize" />
+        </pre-upgrade>
+        <pre-downgrade />
         <upgrade>
           <task xsi:type="restart-task"/>
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/4dac2783/ambari-server/src/main/resources/upgrade-config.xsd
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade-config.xsd b/ambari-server/src/main/resources/upgrade-config.xsd
index e274451..805b472 100644
--- a/ambari-server/src/main/resources/upgrade-config.xsd
+++ b/ambari-server/src/main/resources/upgrade-config.xsd
@@ -88,7 +88,19 @@
             <xs:attribute name="if-type" use="optional" type="xs:string"/>
             <xs:attribute name="if-value" use="optional" type="xs:string"/>
             <xs:attribute name="if-key-state" use="optional" type="set-if-key-state-type"/>
-            <xs:attribute name="mask" use="optional" type="xs:boolean"/>            
+            <xs:attribute name="mask" use="optional" type="xs:boolean"/>
+          </xs:complexType>
+        </xs:element>
+        <xs:element name="regex-replace" minOccurs="0" maxOccurs="unbounded">
+          <xs:complexType>
+            <xs:attribute name="key" use="required" type="xs:string"/>
+            <xs:attribute name="find" use="required" type="xs:string"/>
+            <xs:attribute name="replace-with" use="required" type="xs:string"/>
+            <xs:attribute name="if-key" use="optional" type="xs:string"/>
+            <xs:attribute name="if-type" use="optional" type="xs:string"/>
+            <xs:attribute name="if-value" use="optional" type="xs:string"/>
+            <xs:attribute name="if-key-state" use="optional" type="set-if-key-state-type"/>
+            <xs:attribute name="mask" use="optional" type="xs:boolean"/>
           </xs:complexType>
         </xs:element>
       </xs:choice>


[24/50] [abbrv] ambari git commit: AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json.orig b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json.orig
new file mode 100644
index 0000000..bcc5359
--- /dev/null
+++ b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json.orig
@@ -0,0 +1,1320 @@
+{
+  "identities": [{
+    "principal": {
+      "type": "service",
+      "value": "HTTP/_HOST@${realm}"
+    },
+    "name": "spnego",
+    "keytab": {
+      "owner": {
+        "access": "r",
+        "name": "root"
+      },
+      "file": "${keytab_dir}/spnego.service.keytab",
+      "group": {
+        "access": "r",
+        "name": "${cluster-env/user_group}"
+      }
+    }
+  }, {
+    "principal": {
+      "configuration": "cluster-env/smokeuser_principal_name",
+      "type": "user",
+      "local_username": "${cluster-env/smokeuser}",
+      "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}"
+    },
+    "name": "smokeuser",
+    "keytab": {
+      "owner": {
+        "access": "r",
+        "name": "${cluster-env/smokeuser}"
+      },
+      "file": "${keytab_dir}/smokeuser.headless.keytab",
+      "configuration": "cluster-env/smokeuser_keytab",
+      "group": {
+        "access": "r",
+        "name": "${cluster-env/user_group}"
+      }
+    }
+  }],
+  "services": [{
+    "components": [{
+      "name": "MAHOUT"
+    }],
+    "identities": [{
+      "name": "/smokeuser"
+    }, {
+      "name": "/HDFS/hdfs"
+    }],
+    "name": "MAHOUT"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "mapred-site/mapreduce.jobhistory.principal",
+          "type": "service",
+          "local_username": "${mapred-env/mapred_user}",
+          "value": "jhs/_HOST@${realm}"
+        },
+        "name": "history_server_jhs",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${mapred-env/mapred_user}"
+          },
+          "file": "${keytab_dir}/jhs.service.keytab",
+          "configuration": "mapred-site/mapreduce.jobhistory.keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal",
+          "type": "service",
+          "value": "HTTP/_HOST@${realm}"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "file": "${keytab_dir}/spnego.service.keytab",
+          "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file",
+          "group": {}
+        }
+      }],
+      "name": "HISTORYSERVER"
+    }],
+    "identities": [{
+      "name": "/spnego"
+    }, {
+      "name": "/HDFS/hdfs"
+    }, {
+      "name": "/smokeuser"
+    }],
+    "name": "MAPREDUCE2"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "oozie-site/oozie.service.HadoopAccessorService.kerberos.principal",
+          "type": "service",
+          "local_username": "${oozie-env/oozie_user}",
+          "value": "oozie/_HOST@${realm}"
+        },
+        "name": "oozie_server",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${oozie-env/oozie_user}"
+          },
+          "file": "${keytab_dir}/oozie.service.keytab",
+          "configuration": "oozie-site/oozie.service.HadoopAccessorService.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "oozie-site/oozie.authentication.kerberos.principal",
+          "type": "service"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "configuration": "oozie-site/oozie.authentication.kerberos.keytab",
+          "group": {}
+        }
+      }],
+      "name": "OOZIE_SERVER"
+    }],
+    "identities": [{
+      "name": "/spnego"
+    }, {
+      "name": "/smokeuser"
+    }, {
+      "name": "/HDFS/hdfs"
+    }],
+    "auth_to_local_properties": [
+      "oozie-site/oozie.authentication.kerberos.name.rules"
+    ],
+    "configurations": [{
+      "oozie-site": {
+        "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
+        "oozie.authentication.type": "kerberos",
+        "oozie.service.AuthorizationService.authorization.enabled": "true",
+        "local.realm": "${realm}",
+        "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials"
+      }
+    }],
+    "name": "OOZIE"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+          "type": "service",
+          "local_username": "${hadoop-env/hdfs_user}",
+          "value": "nn/_HOST@${realm}"
+        },
+        "name": "secondary_namenode_nn",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hadoop-env/hdfs_user}"
+          },
+          "file": "${keytab_dir}/nn.service.keytab",
+          "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal",
+          "type": "service",
+          "value": "HTTP/_HOST@${realm}"
+        },
+        "name": "/spnego"
+      }],
+      "name": "SECONDARY_NAMENODE"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+          "type": "service",
+          "local_username": "${hadoop-env/hdfs_user}",
+          "value": "dn/_HOST@${realm}"
+        },
+        "name": "datanode_dn",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hadoop-env/hdfs_user}"
+          },
+          "file": "${keytab_dir}/dn.service.keytab",
+          "configuration": "hdfs-site/dfs.datanode.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "configurations": [{
+        "hdfs-site": {
+          "dfs.datanode.address": "0.0.0.0:1019",
+          "dfs.datanode.http.address": "0.0.0.0:1022"
+        }
+      }],
+      "name": "DATANODE"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "hdfs-site/nfs.kerberos.principal",
+          "type": "service",
+          "local_username": "${hadoop-env/hdfs_user}",
+          "value": "nfs/_HOST@${realm}"
+        },
+        "name": "nfsgateway",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hadoop-env/hdfs_user}"
+          },
+          "file": "${keytab_dir}/nfs.service.keytab",
+          "configuration": "hdfs-site/nfs.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "name": "NFS_GATEWAY"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+          "type": "service",
+          "local_username": "${hadoop-env/hdfs_user}",
+          "value": "jn/_HOST@${realm}"
+        },
+        "name": "journalnode_jn",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hadoop-env/hdfs_user}"
+          },
+          "file": "${keytab_dir}/jn.service.keytab",
+          "configuration": "hdfs-site/dfs.journalnode.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal",
+          "type": "service",
+          "value": "HTTP/_HOST@${realm}"
+        },
+        "name": "/spnego"
+      }],
+      "name": "JOURNALNODE"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+          "type": "service",
+          "local_username": "${hadoop-env/hdfs_user}",
+          "value": "nn/_HOST@${realm}"
+        },
+        "name": "namenode_nn",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hadoop-env/hdfs_user}"
+          },
+          "file": "${keytab_dir}/nn.service.keytab",
+          "configuration": "hdfs-site/dfs.namenode.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal",
+          "type": "service",
+          "value": "HTTP/_HOST@${realm}"
+        },
+        "name": "/spnego"
+      }],
+      "configurations": [{
+        "hdfs-site": {
+          "dfs.block.access.token.enable": "true"
+        }
+      }],
+      "name": "NAMENODE"
+    }],
+    "identities": [{
+      "principal": {
+        "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal",
+        "type": "service",
+        "value": "HTTP/_HOST@${realm}"
+      },
+      "name": "/spnego",
+      "keytab": {
+        "owner": {},
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab",
+        "group": {}
+      }
+    }, {
+      "name": "/smokeuser"
+    }, {
+      "principal": {
+        "configuration": "hadoop-env/hdfs_principal_name",
+        "type": "user",
+        "local_username": "${hadoop-env/hdfs_user}",
+        "value": "${hadoop-env/hdfs_user}-${cluster_name|toLower()}@${realm}"
+      },
+      "name": "hdfs",
+      "keytab": {
+        "owner": {
+          "access": "r",
+          "name": "${hadoop-env/hdfs_user}"
+        },
+        "file": "${keytab_dir}/hdfs.headless.keytab",
+        "configuration": "hadoop-env/hdfs_user_keytab",
+        "group": {
+          "access": "r",
+          "name": "${cluster-env/user_group}"
+        }
+      }
+    }],
+    "auth_to_local_properties": [
+      "core-site/hadoop.security.auth_to_local"
+    ],
+    "configurations": [{
+      "core-site": {
+        "hadoop.security.authorization": "true",
+        "hadoop.security.authentication": "kerberos",
+        "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+      }
+    }],
+    "name": "HDFS"
+  }, {
+    "components": [{
+      "configurations": [{
+        "tez-site": {
+          "tez.am.view-acls": ""
+        }
+      }],
+      "name": "TEZ_CLIENT"
+    }],
+    "name": "TEZ"
+  }, {
+    "components": [{
+      "name": "SPARK_CLIENT"
+    }, {
+      "name": "SPARK_JOBHISTORYSERVER"
+    }],
+    "identities": [{
+      "name": "/smokeuser"
+    }, {
+      "name": "/HDFS/hdfs"
+    }, {
+      "principal": {
+        "configuration": "spark-defaults/spark.history.kerberos.principal",
+        "type": "user",
+        "local_username": "${spark-env/spark_user}",
+        "value": "${spark-env/spark_user}-${cluster_name|toLower()}@${realm}"
+      },
+      "name": "sparkuser",
+      "keytab": {
+        "owner": {
+          "access": "r",
+          "name": "${spark-env/spark_user}"
+        },
+        "file": "${keytab_dir}/spark.headless.keytab",
+        "configuration": "spark-defaults/spark.history.kerberos.keytab",
+        "group": {
+          "access": "",
+          "name": "${cluster-env/user_group}"
+        }
+      }
+    }],
+    "configurations": [{
+      "spark-defaults": {
+        "spark.history.kerberos.enabled": "true"
+      }
+    }],
+    "name": "SPARK"
+  }, {
+    "components": [{
+      "name": "ACCUMULO_MASTER"
+    }, {
+      "name": "ACCUMULO_MONITOR"
+    }, {
+      "name": "ACCUMULO_CLIENT"
+    }, {
+      "name": "ACCUMULO_TRACER"
+    }, {
+      "name": "ACCUMULO_TSERVER"
+    }, {
+      "name": "ACCUMULO_GC"
+    }],
+    "identities": [{
+      "principal": {
+        "configuration": "accumulo-env/accumulo_principal_name",
+        "type": "user",
+        "local_username": "${accumulo-env/accumulo_user}",
+        "value": "${accumulo-env/accumulo_user}-${cluster_name|toLower()}@${realm}"
+      },
+      "name": "accumulo",
+      "keytab": {
+        "owner": {
+          "access": "r",
+          "name": "${accumulo-env/accumulo_user}"
+        },
+        "file": "${keytab_dir}/accumulo.headless.keytab",
+        "configuration": "accumulo-env/accumulo_user_keytab",
+        "group": {
+          "access": "r",
+          "name": "${cluster-env/user_group}"
+        }
+      }
+    }, {
+      "principal": {
+        "configuration": "accumulo-site/general.kerberos.principal",
+        "type": "service",
+        "local_username": "${accumulo-env/accumulo_user}",
+        "value": "${accumulo-env/accumulo_user}/_HOST@${realm}"
+      },
+      "name": "accumulo_service",
+      "keytab": {
+        "owner": {
+          "access": "r",
+          "name": "${accumulo-env/accumulo_user}"
+        },
+        "file": "${keytab_dir}/accumulo.service.keytab",
+        "configuration": "accumulo-site/general.kerberos.keytab",
+        "group": {
+          "access": "",
+          "name": "${cluster-env/user_group}"
+        }
+      }
+    }, {
+      "principal": {
+        "configuration": "accumulo-site/trace.user",
+        "type": "user",
+        "local_username": "${accumulo-env/accumulo_user}",
+        "value": "tracer-${cluster_name|toLower()}@${realm}"
+      },
+      "name": "accumulo_tracer",
+      "keytab": {
+        "owner": {
+          "access": "r",
+          "name": "${accumulo-env/accumulo_user}"
+        },
+        "file": "${keytab_dir}/accumulo-tracer.headless.keytab",
+        "configuration": "accumulo-site/trace.token.property.keytab",
+        "group": {
+          "access": "",
+          "name": "${cluster-env/user_group}"
+        }
+      }
+    }, {
+      "name": "/HDFS/hdfs"
+    }, {
+      "name": "/smokeuser"
+    }],
+    "configurations": [{
+      "accumulo-site": {
+        "instance.security.authenticator": "org.apache.accumulo.server.security.handler.KerberosAuthenticator",
+        "instance.rpc.sasl.enabled": "true",
+        "general.delegation.token.lifetime": "7d",
+        "trace.token.type": "org.apache.accumulo.core.client.security.tokens.KerberosToken",
+        "instance.security.permissionHandler": "org.apache.accumulo.server.security.handler.KerberosPermissionHandler",
+        "general.delegation.token.update.interval": "1d",
+        "instance.security.authorizor": "org.apache.accumulo.server.security.handler.KerberosAuthorizor"
+      }
+    }],
+    "name": "ACCUMULO"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "zookeeper-env/zookeeper_principal_name",
+          "type": "service",
+          "value": "zookeeper/_HOST@${realm}"
+        },
+        "name": "zookeeper_zk",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${zookeeper-env/zk_user}"
+          },
+          "file": "${keytab_dir}/zk.service.keytab",
+          "configuration": "zookeeper-env/zookeeper_keytab_path",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "name": "ZOOKEEPER_SERVER"
+    }],
+    "identities": [{
+      "name": "/smokeuser"
+    }],
+    "name": "ZOOKEEPER"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "hbase-site/hbase.regionserver.kerberos.principal",
+          "type": "service",
+          "local_username": "${hbase-env/hbase_user}",
+          "value": "hbase/_HOST@${realm}"
+        },
+        "name": "hbase_regionserver_hbase",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hbase-env/hbase_user}"
+          },
+          "file": "${keytab_dir}/hbase.service.keytab",
+          "configuration": "hbase-site/hbase.regionserver.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "name": "HBASE_REGIONSERVER"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "hbase-site/hbase.master.kerberos.principal",
+          "type": "service",
+          "local_username": "${hbase-env/hbase_user}",
+          "value": "hbase/_HOST@${realm}"
+        },
+        "name": "hbase_master_hbase",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hbase-env/hbase_user}"
+          },
+          "file": "${keytab_dir}/hbase.service.keytab",
+          "configuration": "hbase-site/hbase.master.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "name": "HBASE_MASTER"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "hbase-site/phoenix.queryserver.kerberos.principal",
+          "type": "service",
+          "local_username": "${hbase-env/hbase_user}",
+          "value": "hbase/_HOST@${realm}"
+        },
+        "name": "hbase_queryserver_hbase",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hbase-env/hbase_user}"
+          },
+          "file": "${keytab_dir}/hbase.service.keytab",
+          "configuration": "hbase-site/phoenix.queryserver.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "name": "PHOENIX_QUERY_SERVER"
+    }],
+    "identities": [{
+      "name": "/spnego"
+    }, {
+      "name": "/HDFS/hdfs"
+    }, {
+      "principal": {
+        "configuration": "hbase-env/hbase_principal_name",
+        "type": "user",
+        "local_username": "${hbase-env/hbase_user}",
+        "value": "${hbase-env/hbase_user}-${cluster_name|toLower()}@${realm}"
+      },
+      "name": "hbase",
+      "keytab": {
+        "owner": {
+          "access": "r",
+          "name": "${hbase-env/hbase_user}"
+        },
+        "file": "${keytab_dir}/hbase.headless.keytab",
+        "configuration": "hbase-env/hbase_user_keytab",
+        "group": {
+          "access": "r",
+          "name": "${cluster-env/user_group}"
+        }
+      }
+    }, {
+      "name": "/smokeuser"
+    }],
+    "configurations": [{
+      "hbase-site": {
+        "hbase.coprocessor.master.classes": "{{hbase_coprocessor_master_classes}}",
+        "hbase.security.authentication": "kerberos",
+        "hbase.coprocessor.region.classes": "{{hbase_coprocessor_region_classes}}",
+        "hbase.security.authorization": "true",
+        "hbase.bulkload.staging.dir": "/apps/hbase/staging",
+        "zookeeper.znode.parent": "/hbase-secure"
+      }
+    }],
+    "name": "HBASE"
+  }, {
+    "components": [{
+      "name": "KERBEROS_CLIENT"
+    }],
+    "identities": [{
+      "name": "/smokeuser"
+    }],
+    "name": "KERBEROS"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "kms-site/hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal",
+          "type": "service"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "configuration": "kms-site/hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab",
+          "group": {}
+        }
+      }, {
+        "name": "/smokeuser"
+      }],
+      "name": "RANGER_KMS_SERVER"
+    }],
+    "identities": [{
+      "name": "/spnego",
+      "keytab": {
+        "owner": {},
+        "configuration": "kms-site/hadoop.kms.authentication.kerberos.keytab",
+        "group": {}
+      }
+    }, {
+      "name": "/smokeuser"
+    }],
+    "configurations": [{
+      "kms-site": {
+        "hadoop.kms.authentication.kerberos.principal": "*",
+        "hadoop.kms.authentication.type": "kerberos"
+      }
+    }],
+    "name": "RANGER_KMS"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "yarn-site/yarn.nodemanager.principal",
+          "type": "service",
+          "local_username": "${yarn-env/yarn_user}",
+          "value": "nm/_HOST@${realm}"
+        },
+        "name": "nodemanager_nm",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${yarn-env/yarn_user}"
+          },
+          "file": "${keytab_dir}/nm.service.keytab",
+          "configuration": "yarn-site/yarn.nodemanager.keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal",
+          "type": "service",
+          "value": "HTTP/_HOST@${realm}"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "file": "${keytab_dir}/spnego.service.keytab",
+          "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file",
+          "group": {}
+        }
+      }],
+      "configurations": [{
+        "yarn-site": {
+          "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+        }
+      }],
+      "name": "NODEMANAGER"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "yarn-site/yarn.timeline-service.principal",
+          "type": "service",
+          "local_username": "${yarn-env/yarn_user}",
+          "value": "yarn/_HOST@${realm}"
+        },
+        "name": "app_timeline_server_yarn",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${yarn-env/yarn_user}"
+          },
+          "file": "${keytab_dir}/yarn.service.keytab",
+          "configuration": "yarn-site/yarn.timeline-service.keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal",
+          "type": "service",
+          "value": "HTTP/_HOST@${realm}"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "file": "${keytab_dir}/spnego.service.keytab",
+          "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab",
+          "group": {}
+        }
+      }],
+      "name": "APP_TIMELINE_SERVER"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "yarn-site/yarn.resourcemanager.principal",
+          "type": "service",
+          "local_username": "${yarn-env/yarn_user}",
+          "value": "rm/_HOST@${realm}"
+        },
+        "name": "resource_manager_rm",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${yarn-env/yarn_user}"
+          },
+          "file": "${keytab_dir}/rm.service.keytab",
+          "configuration": "yarn-site/yarn.resourcemanager.keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal",
+          "type": "service",
+          "value": "HTTP/_HOST@${realm}"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "file": "${keytab_dir}/spnego.service.keytab",
+          "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file",
+          "group": {}
+        }
+      }],
+      "name": "RESOURCEMANAGER"
+    }],
+    "identities": [{
+      "name": "/spnego"
+    }, {
+      "name": "/HDFS/hdfs"
+    }, {
+      "name": "/smokeuser"
+    }],
+    "configurations": [{
+      "capacity-scheduler": {
+        "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+        "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+        "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+        "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+        "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+      }
+    }, {
+      "yarn-site": {
+        "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+        "yarn.resourcemanager.proxyusers.*.users": "",
+        "yarn.timeline-service.http-authentication.token.validity": "",
+        "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+        "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+        "yarn.timeline-service.http-authentication.cookie.path": "",
+        "yarn.timeline-service.http-authentication.type": "kerberos",
+        "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+        "yarn.acl.enable": "true",
+        "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+        "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+        "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+        "yarn.timeline-service.http-authentication.signature.secret": "",
+        "yarn.timeline-service.http-authentication.signature.secret.file": "",
+        "yarn.resourcemanager.proxyusers.*.hosts": "",
+        "yarn.resourcemanager.proxyusers.*.groups": "",
+        "yarn.timeline-service.enabled": "true",
+        "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+        "yarn.timeline-service.http-authentication.cookie.domain": ""
+      }
+    }, {
+      "core-site": {
+        "hadoop.proxyuser.yarn.groups": "*",
+        "hadoop.proxyuser.yarn.hosts": "${yarn-site/yarn.resourcemanager.hostname}"
+      }
+    }],
+    "name": "YARN"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "knox-env/knox_principal_name",
+          "type": "service",
+          "local_username": "${knox-env/knox_user}",
+          "value": "${knox-env/knox_user}/_HOST@${realm}"
+        },
+        "name": "knox_principal",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${knox-env/knox_user}"
+          },
+          "file": "${keytab_dir}/knox.service.keytab",
+          "configuration": "knox-env/knox_keytab_path",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "configurations": [{
+        "oozie-site": {
+          "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+          "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+        }
+      }, {
+        "webhcat-site": {
+          "webhcat.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+          "webhcat.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+        }
+      }, {
+        "gateway-site": {
+          "gateway.hadoop.kerberos.secured": "true",
+          "java.security.krb5.conf": "/etc/krb5.conf"
+        }
+      }, {
+        "core-site": {
+          "hadoop.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}",
+          "hadoop.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}"
+        }
+      }],
+      "name": "KNOX_GATEWAY"
+    }],
+    "name": "KNOX"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "storm-env/storm_ui_principal_name",
+          "type": "service"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "configuration": "storm-env/storm_ui_keytab",
+          "group": {}
+        }
+      }],
+      "name": "STORM_UI_SERVER"
+    }, {
+      "name": "SUPERVISOR"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "storm-env/nimbus_principal_name",
+          "type": "service",
+          "value": "nimbus/_HOST@${realm}"
+        },
+        "name": "nimbus_server",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${storm-env/storm_user}"
+          },
+          "file": "${keytab_dir}/nimbus.service.keytab",
+          "configuration": "storm-env/nimbus_keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "name": "NIMBUS"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "storm-env/nimbus_principal_name",
+          "type": "service",
+          "value": "nimbus/_HOST@${realm}"
+        },
+        "name": "nimbus_server",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${storm-env/storm_user}"
+          },
+          "file": "${keytab_dir}/nimbus.service.keytab",
+          "configuration": "storm-env/nimbus_keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "name": "DRPC_SERVER"
+    }],
+    "identities": [{
+      "name": "/spnego"
+    }, {
+      "name": "/smokeuser"
+    }, {
+      "principal": {
+        "configuration": "storm-env/storm_principal_name",
+        "type": "user",
+        "value": "${storm-env/storm_user}-${cluster_name|toLower()}@${realm}"
+      },
+      "name": "storm_components",
+      "keytab": {
+        "owner": {
+          "access": "r",
+          "name": "${storm-env/storm_user}"
+        },
+        "file": "${keytab_dir}/storm.headless.keytab",
+        "configuration": "storm-env/storm_keytab",
+        "group": {
+          "access": "",
+          "name": "${cluster-env/user_group}"
+        }
+      }
+    }],
+    "configurations": [{
+      "storm-site": {
+        "nimbus.authorizer": "backtype.storm.security.auth.authorizer.SimpleACLAuthorizer",
+        "java.security.auth.login.config": "{{conf_dir}}/storm_jaas.conf",
+        "drpc.authorizer": "backtype.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer",
+        "storm.principal.tolocal": "backtype.storm.security.auth.KerberosPrincipalToLocal",
+        "storm.zookeeper.superACL": "sasl:{{storm_bare_jaas_principal}}",
+        "ui.filter.params": "{'type': 'kerberos', 'kerberos.principal': '{{storm_ui_jaas_principal}}', 'kerberos.keytab': '{{storm_ui_keytab_path}}', 'kerberos.name.rules': 'DEFAULT'}",
+        "nimbus.supervisor.users": "['{{storm_bare_jaas_principal}}']",
+        "nimbus.admins": "['{{storm_bare_jaas_principal}}']",
+        "ui.filter": "org.apache.hadoop.security.authentication.server.AuthenticationFilter",
+        "supervisor.enable": "true"
+      }
+    }],
+    "name": "STORM"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "application-properties/atlas.authentication.principal",
+          "type": "service",
+          "local_username": "${atlas-env/metadata_user}",
+          "value": "atlas/_HOST@${realm}"
+        },
+        "name": "atlas",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${atlas-env/metadata_user}"
+          },
+          "file": "${keytab_dir}/atlas.service.keytab",
+          "configuration": "application-properties/atlas.authentication.keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "application-properties/atlas.http.authentication.kerberos.principal",
+          "type": "service",
+          "value": "HTTP/_HOST@${realm}"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "configuration": "application-properties/atlas.http.authentication.kerberos.keytab",
+          "group": {}
+        }
+      }],
+      "name": "ATLAS_SERVER"
+    }],
+    "auth_to_local_properties": [
+      "application-properties/atlas.http.authentication.kerberos.name.rules|new_lines_escaped"
+    ],
+    "configurations": [{
+      "application-properties": {
+        "atlas.authentication.method": "kerberos",
+        "atlas.http.authentication.enabled": "true",
+        "atlas.http.authentication.type": "kerberos"
+      }
+    }],
+    "name": "ATLAS"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
+          "type": "service",
+          "local_username": "${hive-env/hive_user}",
+          "value": "hive/_HOST@${realm}"
+        },
+        "name": "hive_server_hive",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hive-env/hive_user}"
+          },
+          "file": "${keytab_dir}/hive.service.keytab",
+          "configuration": "hive-site/hive.server2.authentication.kerberos.keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "hive-site/hive.server2.authentication.spnego.principal",
+          "type": "service"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "configuration": "hive-site/hive.server2.authentication.spnego.keytab",
+          "group": {}
+        }
+      }],
+      "name": "HIVE_SERVER"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "hive-site/hive.metastore.kerberos.principal",
+          "type": "service",
+          "local_username": "${hive-env/hive_user}",
+          "value": "hive/_HOST@${realm}"
+        },
+        "name": "hive_metastore_hive",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${hive-env/hive_user}"
+          },
+          "file": "${keytab_dir}/hive.service.keytab",
+          "configuration": "hive-site/hive.metastore.kerberos.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "name": "HIVE_METASTORE"
+    }, {
+      "identities": [{
+        "principal": {
+          "configuration": "webhcat-site/templeton.kerberos.principal",
+          "type": "service"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "configuration": "webhcat-site/templeton.kerberos.keytab",
+          "group": {}
+        }
+      }],
+      "name": "WEBHCAT_SERVER"
+    }],
+    "identities": [{
+      "name": "/spnego"
+    }, {
+      "name": "/smokeuser"
+    }],
+    "configurations": [{
+      "hive-site": {
+        "hive.metastore.sasl.enabled": "true",
+        "hive.server2.authentication": "KERBEROS"
+      }
+    }, {
+      "webhcat-site": {
+        "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}",
+        "templeton.kerberos.secret": "secret"
+      }
+    }, {
+      "core-site": {
+        "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host}"
+      }
+    }],
+    "name": "HIVE"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "ams-hbase-security-site/hbase.master.kerberos.principal",
+          "type": "service",
+          "local_username": "${ams-env/ambari_metrics_user}",
+          "value": "amshbasemaster/_HOST@${realm}"
+        },
+        "name": "ams_hbase_master_hbase",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${ams-env/ambari_metrics_user}"
+          },
+          "file": "${keytab_dir}/ams-hbase.master.keytab",
+          "configuration": "ams-hbase-security-site/hbase.master.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "ams-hbase-security-site/hbase.regionserver.kerberos.principal",
+          "type": "service",
+          "local_username": "${ams-env/ambari_metrics_user}",
+          "value": "amshbasers/_HOST@${realm}"
+        },
+        "name": "ams_hbase_regionserver_hbase",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${ams-env/ambari_metrics_user}"
+          },
+          "file": "${keytab_dir}/ams-hbase.regionserver.keytab",
+          "configuration": "ams-hbase-security-site/hbase.regionserver.keytab.file",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "ams-hbase-security-site/hbase.myclient.principal",
+          "type": "service",
+          "local_username": "${ams-env/ambari_metrics_user}",
+          "value": "amshbase/_HOST@${realm}"
+        },
+        "name": "ams_collector",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${ams-env/ambari_metrics_user}"
+          },
+          "file": "${keytab_dir}/ams.collector.keytab",
+          "configuration": "ams-hbase-security-site/hbase.myclient.keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "ams-hbase-security-site/ams.zookeeper.principal",
+          "type": "service",
+          "local_username": "${ams-env/ambari_metrics_user}",
+          "value": "amszk/_HOST@${realm}"
+        },
+        "name": "ams_zookeeper",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${ams-env/ambari_metrics_user}"
+          },
+          "file": "${keytab_dir}/ams-zk.service.keytab",
+          "configuration": "ams-hbase-security-site/ams.zookeeper.keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "configurations": [{
+        "ams-hbase-security-site": {
+          "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
+          "hadoop.security.authentication": "kerberos",
+          "hbase.security.authentication": "kerberos",
+          "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController",
+          "hbase.security.authorization": "true",
+          "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "true",
+          "hbase.zookeeper.property.jaasLoginRenew": "3600000",
+          "hbase.zookeeper.property.authProvider.1": "org.apache.zookeeper.server.auth.SASLAuthenticationProvider",
+          "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "true"
+        }
+      },
+        {
+          "ams-hbase-site": {
+            "zookeeper.znode.parent": "/ams-hbase-secure"
+          }
+        }
+      ],
+      "name": "METRICS_COLLECTOR"
+    }],
+    "identities": [{
+      "name": "/spnego"
+    }],
+    "name": "AMBARI_METRICS"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "kafka-env/kafka_principal_name",
+          "type": "service",
+          "value": "${kafka-env/kafka_user}/_HOST@${realm}"
+        },
+        "name": "kafka_broker",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${kafka-env/kafka_user}"
+          },
+          "file": "${keytab_dir}/kafka.service.keytab",
+          "configuration": "kafka-env/kafka_keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }],
+      "name": "KAFKA_BROKER"
+    }],
+    "identities": [{
+      "name": "/smokeuser"
+    }],
+    "configurations": [{
+      "kafka-broker": {
+        "principal.to.local.class": "kafka.security.auth.KerberosPrincipalToLocal",
+        "authorizer.class.name": "kafka.security.auth.SimpleAclAuthorizer",
+        "super.users": "user:${kafka-env/kafka_user}",
+        "security.inter.broker.protocol": "PLAINTEXTSASL"
+      }
+    }],
+    "name": "KAFKA"
+  }, {
+    "components": [{
+      "identities": [{
+        "principal": {
+          "configuration": "falcon-startup.properties/*.falcon.service.authentication.kerberos.principal",
+          "type": "service",
+          "local_username": "${falcon-env/falcon_user}",
+          "value": "falcon/_HOST@${realm}"
+        },
+        "name": "falcon_server",
+        "keytab": {
+          "owner": {
+            "access": "r",
+            "name": "${falcon-env/falcon_user}"
+          },
+          "file": "${keytab_dir}/falcon.service.keytab",
+          "configuration": "falcon-startup.properties/*.falcon.service.authentication.kerberos.keytab",
+          "group": {
+            "access": "",
+            "name": "${cluster-env/user_group}"
+          }
+        }
+      }, {
+        "principal": {
+          "configuration": "falcon-startup.properties/*.falcon.http.authentication.kerberos.principal",
+          "type": "service",
+          "value": "HTTP/_HOST@${realm}"
+        },
+        "name": "/spnego",
+        "keytab": {
+          "owner": {},
+          "configuration": "falcon-startup.properties/*.falcon.http.authentication.kerberos.keytab",
+          "group": {}
+        }
+      }],
+      "name": "FALCON_SERVER"
+    }],
+    "identities": [{
+      "name": "/spnego"
+    }, {
+      "name": "/smokeuser"
+    }, {
+      "name": "/HDFS/hdfs"
+    }],
+    "auth_to_local_properties": [
+      "falcon-startup.properties/*.falcon.http.authentication.kerberos.name.rules|new_lines_escaped"
+    ],
+    "configurations": [{
+      "falcon-startup.properties": {
+        "*.dfs.namenode.kerberos.principal": "nn/_HOST@${realm}",
+        "*.falcon.http.authentication.type": "kerberos",
+        "*.falcon.authentication.type": "kerberos"
+      }
+    }],
+    "name": "FALCON"
+  }],
+  "properties": {
+    "additional_realms": "",
+    "keytab_dir": "/etc/security/keytabs",
+    "realm": "EXAMPLE.COM"
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
index 147c1c0..0a8f20b 100644
--- a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
+++ b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json
@@ -2893,7 +2893,7 @@
             {
               "yarn-site" : {
                 "yarn.timeline-service.http-authentication.signer.secret.provider.object" : "",
-                "yarn.resourcemanager.proxyusers.*.users" : "",
+                "yarn.resourcemanager.proxyuser.*.users" : "",
                 "yarn.timeline-service.http-authentication.token.validity" : "",
                 "yarn.timeline-service.http-authentication.kerberos.name.rules" : "",
                 "yarn.timeline-service.http-authentication.cookie.path" : "",
@@ -2901,14 +2901,14 @@
                 "yarn.resourcemanager.proxy-user-privileges.enabled" : "true",
                 "yarn.acl.enable" : "true",
                 "yarn.timeline-service.http-authentication.signer.secret.provider" : "",
-                "yarn.timeline-service.http-authentication.proxyusers.*.groups" : "",
-                "yarn.timeline-service.http-authentication.proxyusers.*.hosts" : "",
+                "yarn.timeline-service.http-authentication.proxyuser.*.groups" : "",
+                "yarn.timeline-service.http-authentication.proxyuser.*.hosts" : "",
                 "yarn.timeline-service.http-authentication.signature.secret" : "",
                 "yarn.timeline-service.http-authentication.signature.secret.file" : "",
-                "yarn.resourcemanager.proxyusers.*.hosts" : "",
-                "yarn.resourcemanager.proxyusers.*.groups" : "",
+                "yarn.resourcemanager.proxyuser.*.hosts" : "",
+                "yarn.resourcemanager.proxyuser.*.groups" : "",
                 "yarn.timeline-service.enabled" : "false",
-                "yarn.timeline-service.http-authentication.proxyusers.*.users" : "",
+                "yarn.timeline-service.http-authentication.proxyuser.*.users" : "",
                 "yarn.timeline-service.http-authentication.cookie.domain" : ""
               }
             }


[02/50] [abbrv] ambari git commit: AMBARI-19568. Setup the correct authentication and authorization between ZooKeeper and oozie. (Attila Magyar via stoader)

Posted by nc...@apache.org.
AMBARI-19568. Setup the correct authentication and authorization between ZooKeeper and oozie. (Attila Magyar via stoader)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aae7013f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aae7013f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aae7013f

Branch: refs/heads/branch-dev-patch-upgrade
Commit: aae7013f48983b093b31897997c9e725d8110f16
Parents: 285666f
Author: Attila Magyar <am...@hortonworks.com>
Authored: Tue Jan 17 09:18:15 2017 +0100
Committer: Toader, Sebastian <st...@hortonworks.com>
Committed: Tue Jan 17 09:18:15 2017 +0100

----------------------------------------------------------------------
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |  6 +++++
 .../4.0.0.2.0/package/scripts/oozie_server.py   | 11 +++++++--
 .../4.0.0.2.0/package/scripts/params_linux.py   | 12 ++++++---
 .../package/templates/zkmigrator_jaas.conf.j2   | 26 ++++++++++++++++++++
 .../OOZIE/4.2.0.2.3/kerberos.json               |  3 ++-
 .../HDP/2.0.6/properties/stack_features.json    |  3 +--
 .../HDP/3.0/properties/stack_features.json      |  3 +--
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     | 13 +++++++---
 .../HDF/2.0/properties/stack_features.json      |  3 +--
 9 files changed, 63 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aae7013f/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 252f60e..3cdafe9 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -353,6 +353,12 @@ def oozie_server_specific():
     group = params.user_group,
     recursive_ownership = True,  
   )
+  if params.security_enabled:
+    File(os.path.join(params.conf_dir, 'zkmigrator_jaas.conf'),
+         owner=params.oozie_user,
+         group=params.user_group,
+         content=Template("zkmigrator_jaas.conf.j2")
+         )
 
 def __parse_sharelib_from_output(output):
   """

http://git-wip-us.apache.org/repos/asf/ambari/blob/aae7013f/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
index e0778da..1a34b87 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
@@ -42,7 +42,7 @@ from oozie_service import oozie_service
 from oozie_server_upgrade import OozieUpgrade
 
 from check_oozie_server_status import check_oozie_server_status
-
+from resource_management.core.resources.zkmigrator import ZkMigrator
 
 class OozieServer(Script):
 
@@ -193,7 +193,14 @@ class OozieServerDefault(OozieServer):
       stack_select.select("oozie-server", params.version)
 
     OozieUpgrade.prepare_libext_directory()
-    
+
+  def disable_security(self, env):
+    import params
+    if not params.zk_connection_string:
+      return
+    zkmigrator = ZkMigrator(params.zk_connection_string, params.java_exec, params.java64_home, params.jaas_file, params.oozie_user)
+    zkmigrator.set_acls(params.zk_namespace if params.zk_namespace.startswith('/') else '/' + params.zk_namespace, 'world:anyone:crdwa')
+
   def get_log_folder(self):
     import params
     return params.oozie_log_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/aae7013f/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index f9c608e..48c8ef0 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -138,6 +138,8 @@ oozie_pid_dir = status_params.oozie_pid_dir
 pid_file = status_params.pid_file
 hadoop_jar_location = "/usr/lib/hadoop/"
 java_share_dir = "/usr/share/java"
+java64_home = config['hostLevelParams']['java_home']
+java_exec = format("{java64_home}/bin/java")
 ext_js_file = "ext-2.2.zip"
 ext_js_path = format("/usr/share/{stack_name_uppercase}-oozie/{ext_js_file}")
 security_enabled = config['configurations']['cluster-env']['security_enabled']
@@ -157,9 +159,13 @@ oozie_site = config['configurations']['oozie-site']
 # Need this for yarn.nodemanager.recovery.dir in yarn-site
 yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
 yarn_resourcemanager_address = config['configurations']['yarn-site']['yarn.resourcemanager.address']
+zk_namespace = default('/configurations/oozie-site/oozie.zookeeper.namespace', 'oozie')
+zk_connection_string = default('/configurations/oozie-site/oozie.zookeeper.connection.string', None)
+jaas_file = os.path.join(conf_dir, 'zkmigrator_jaas.conf')
 
 if security_enabled:
   oozie_site = dict(config['configurations']['oozie-site'])
+  oozie_principal_with_host = oozie_principal.replace('_HOST', hostname)
 
   # If a user-supplied oozie.ha.authentication.kerberos.principal property exists in oozie-site,
   # use it to replace the existing oozie.authentication.kerberos.principal value. This is to ensure
@@ -176,10 +182,8 @@ if security_enabled:
 
   if stack_version_formatted and check_stack_feature(StackFeature.OOZIE_HOST_KERBEROS, stack_version_formatted):
     #older versions of oozie have problems when using _HOST in principal
-    oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = \
-      oozie_principal.replace('_HOST', hostname)
-    oozie_site['oozie.authentication.kerberos.principal'] = \
-      http_principal.replace('_HOST', hostname)
+    oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = oozie_principal_with_host
+    oozie_site['oozie.authentication.kerberos.principal'] = http_principal.replace('_HOST', hostname)
 
 smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 oozie_keytab = default("/configurations/oozie-env/oozie_keytab", oozie_service_keytab)

http://git-wip-us.apache.org/repos/asf/ambari/blob/aae7013f/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/zkmigrator_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/zkmigrator_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/zkmigrator_jaas.conf.j2
new file mode 100644
index 0000000..fbc0ce5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/zkmigrator_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=true
+  storeKey=true
+  useTicketCache=false
+  keyTab="{{oozie_keytab}}"
+  principal="{{oozie_principal_with_host}}";
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/aae7013f/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.2.3/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.2.3/kerberos.json b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.2.3/kerberos.json
index d2e2ab8..f1092f5 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.2.3/kerberos.json
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.2.0.2.3/kerberos.json
@@ -20,7 +20,8 @@
             "oozie.service.AuthorizationService.authorization.enabled": "true",
             "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
             "local.realm": "${realm}",
-            "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials"
+            "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials",
+            "oozie.zookeeper.secure" : "true"
           }
         }
       ],

http://git-wip-us.apache.org/repos/asf/ambari/blob/aae7013f/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index fd7fac9..a64af73 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -162,8 +162,7 @@
     {
       "name": "oozie_host_kerberos",
       "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
+      "min_version": "2.0.0.0"
     },
     {
       "name": "falcon_extensions",

http://git-wip-us.apache.org/repos/asf/ambari/blob/aae7013f/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
index dd87b72..ddf8348 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/properties/stack_features.json
@@ -162,8 +162,7 @@
     {
       "name": "oozie_host_kerberos",
       "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
+      "min_version": "2.0.0.0"
     },
     {
       "name": "falcon_extensions",

http://git-wip-us.apache.org/repos/asf/ambari/blob/aae7013f/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index d24d0b9..f5bd4aa 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -601,7 +601,7 @@ class TestOozieServer(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch.object(shell, "call")
-  @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True]))
+  @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True, True]))
   def test_configure_secured(self, call_mocks):
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
@@ -616,7 +616,7 @@ class TestOozieServer(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch.object(shell, "call")
-  @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True]))
+  @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True, True]))
   def test_configure_secured_ha(self, call_mocks):
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
 
@@ -648,7 +648,7 @@ class TestOozieServer(RMFTestCase):
 
   @patch.object(shell, "call")
   @patch("os.path.isfile")
-  @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True]))
+  @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True, True]))
   def test_start_secured(self, isfile_mock, call_mocks):
     isfile_mock.return_value = True
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
@@ -1125,9 +1125,14 @@ class TestOozieServer(RMFTestCase):
                               group = 'hadoop',
                               recursive_ownership = True,
     )
+    self.assertResourceCalled('File', '/etc/oozie/conf/zkmigrator_jaas.conf',
+                              owner = 'oozie',
+                              group = 'hadoop',
+                              content = Template('zkmigrator_jaas.conf.j2')
+                              )
 
   @patch.object(shell, "call")
-  @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True]))
+  @patch('os.path.exists', new=MagicMock(side_effect = [False, True, False, True, True]))
   def test_configure_default_hdp22(self, call_mocks):
     call_mocks = MagicMock(return_value=(0, "New Oozie WAR file with added"))
     config_file = "stacks/2.0.6/configs/default.json"

http://git-wip-us.apache.org/repos/asf/ambari/blob/aae7013f/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/properties/stack_features.json b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/properties/stack_features.json
index 645e357..0b6b3ab 100644
--- a/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/properties/stack_features.json
+++ b/contrib/management-packs/hdf-ambari-mpack/src/main/resources/stacks/HDF/2.0/properties/stack_features.json
@@ -162,8 +162,7 @@
     {
       "name": "oozie_host_kerberos",
       "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "0.0.0.0",
-      "max_version": "0.2.0.0"
+      "min_version": "0.0.0.0"
     },
     {
       "name": "falcon_extensions",


[03/50] [abbrv] ambari git commit: AMBARI-19562. Save is not present for coordinator and bundle.(Padma Priya N via gauravn7)

Posted by nc...@apache.org.
AMBARI-19562. Save is not present for coordinator and bundle.(Padma Priya N via gauravn7)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/684c9e64
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/684c9e64
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/684c9e64

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 684c9e64641c71e1923315e743c236afb5a328fd
Parents: aae7013
Author: Gaurav Nagar <gr...@gmail.com>
Authored: Tue Jan 17 14:53:11 2017 +0530
Committer: Gaurav Nagar <gr...@gmail.com>
Committed: Tue Jan 17 14:53:58 2017 +0530

----------------------------------------------------------------------
 .../ui/app/components/bundle-config.js          | 15 +++++
 .../resources/ui/app/components/coord-config.js | 16 ++++++
 .../ui/app/components/flow-designer.js          | 28 ++--------
 .../main/resources/ui/app/components/save-wf.js | 41 +++++++-------
 .../ui/app/domain/actionjob_hanlder.js          |  7 ++-
 .../app/templates/components/bundle-config.hbs  | 16 ++++--
 .../app/templates/components/coord-config.hbs   | 42 ++++++++++----
 .../app/templates/components/flow-designer.hbs  | 59 ++++----------------
 .../ui/app/templates/components/save-wf.hbs     |  2 +-
 9 files changed, 117 insertions(+), 109 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/684c9e64/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
index c6a24b8..2e0dadb 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/bundle-config.js
@@ -344,6 +344,21 @@ export default Ember.Component.extend(Ember.Evented, Validations, {
     },
     showVersionSettings(value){
       this.set('showVersionSettings', value);
+    },
+    save(){
+      var isDraft = false, bundleXml;
+      if(this.get('validations.isInvalid')) {
+       isDraft = true;
+      }else{
+        var bundleGenerator = BundleGenerator.create({bundle:this.get("bundle")});
+        bundleXml = bundleGenerator.process();
+      }
+      var bundleJson = JSON.stringify(this.get("bundle"));
+      this.set("configForSave",{json:bundleJson, xml:bundleXml, isDraft: isDraft});
+      this.set("showingSaveWorkflow", true);
+    },
+    closeSave(){
+      this.set("showingSaveWorkflow", false);
     }
   }
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/684c9e64/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
index 743a163..eb0d585 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/coord-config.js
@@ -578,6 +578,22 @@ export default Ember.Component.extend(Validations, Ember.Evented, {
     },
     showVersionSettings(value){
       this.set('showVersionSettings', value);
+    },
+    save(){
+      var isDraft = false, coordinatorXml;
+      var isChildComponentsValid = this.validateChildComponents();
+      if(this.get('validations.isInvalid') || !isChildComponentsValid) {
+       isDraft = true;
+      }else{
+        var coordGenerator = CoordinatorGenerator.create({coordinator:this.get("coordinator")});
+        coordinatorXml = coordGenerator.process();
+      }
+      var coordinatorJson = JSON.stringify(this.get("coordinator"));
+      this.set("configForSave",{json:coordinatorJson, xml:coordinatorXml,isDraft: isDraft});
+      this.set("showingSaveWorkflow", true);
+    },
+    closeSave(){
+      this.set("showingSaveWorkflow", false);
     }
   }
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/684c9e64/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
index 9007838..d0c05d6 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/flow-designer.js
@@ -610,21 +610,11 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
     }, 1000);
   },
   openSaveWorkflow() {
-    this.get('workflowContext').clearErrors();
-    var workflowGenerator=WorkflowGenerator.create({workflow:this.get("workflow"),
-    workflowContext:this.get('workflowContext')});
-    var workflowXml=workflowGenerator.process();
-    if(this.get('workflowContext').hasErrors()){
-      this.set('errors',this.get('workflowContext').getErrors());
-      this.set("jobXmlJSONStr", this.getWorkflowAsJson());
-      this.set("isDraft", true);
-    }else{
-      this.set("jobXmlJSONStr", this.getWorkflowAsJson());
-      var dynamicProperties = this.get('propertyExtractor').getDynamicProperties(workflowXml);
-      var configForSubmit={props:dynamicProperties,xml:workflowXml,params:this.get('workflow.parameters')};
-      this.set("workflowSubmitConfigs",configForSubmit);
-      this.set("isDraft", false);
-    }
+    var workflowGenerator = WorkflowGenerator.create({workflow:this.get("workflow"), workflowContext:this.get('workflowContext')});
+    var workflowXml = workflowGenerator.process();
+    var workflowJson = this.getWorkflowAsJson();
+    var isDraft = this.get('workflowContext').hasErrors()? true: false;
+    this.set("configForSave", {json : workflowJson, xml : workflowXml,isDraft : isDraft});
     this.set("showingSaveWorkflow",true);
   },
   openJobConfig (){
@@ -818,9 +808,6 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
     openEditor(node){
       this.openWorkflowEditor(node);
     },
-    setFilePath(filePath){
-      this.set("workflowFilePath", filePath);
-    },
     showNotification(node){
       this.set("showNotificationPanel", true);
       if(node.actionType){
@@ -862,10 +849,6 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
     },
     saveWorkflow(action){
       this.openSaveWorkflow();
-      if(action === "saveDraft"){
-        this.set("isDraft", true);
-      }
-      this.set('dryrun', false);
     },
     previewWorkflow(){
       this.set("showingPreview",false);
@@ -896,7 +879,6 @@ export default Ember.Component.extend(FindNodeMixin, Validations, {
     },
     closeWorkflowSubmitConfigs(){
       this.set("showingWorkflowConfigProps",false);
-      this.set("showingSaveWorkflow",false);
     },
     closeSaveWorkflow(){
       this.set("showingSaveWorkflow",false);

http://git-wip-us.apache.org/repos/asf/ambari/blob/684c9e64/contrib/views/wfmanager/src/main/resources/ui/app/components/save-wf.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/components/save-wf.js b/contrib/views/wfmanager/src/main/resources/ui/app/components/save-wf.js
index d91c52a..c6c0421 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/components/save-wf.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/components/save-wf.js
@@ -16,7 +16,6 @@
 */
 
 import Ember from 'ember';
-import Constants from '../utils/constants';
 import { validator, buildValidations } from 'ember-cp-validations';
 
 const Validations = buildValidations({
@@ -25,10 +24,8 @@ const Validations = buildValidations({
   })
 });
 
-
 export default Ember.Component.extend(Validations, {
   showingFileBrowser : false,
-  jobXml : "",
   overwritePath : false,
   savingInProgress : false,
   isStackTraceVisible: false,
@@ -36,7 +33,6 @@ export default Ember.Component.extend(Validations, {
   alertType : "",
   alertMessage : "",
   alertDetails : "",
-  filePath : "",
   showErrorMessage: false,
   saveJobService : Ember.inject.service('save-job'),
   displayName : Ember.computed('type', function(){
@@ -48,15 +44,23 @@ export default Ember.Component.extend(Validations, {
       return "Bundle";
     }
   }),
-  initialize :function(){
-    this.set("jobXml", this.get("jobConfigs").xml);
-    this.set('filePath', Ember.copy(this.get('jobFilePath')));
+  jobXml : Ember.computed('jobConfigs', function(){
+    return this.get('jobConfigs.xml');
+  }),
+  jobJson : Ember.computed('jobConfigs', function(){
+    return this.get('jobConfigs.json');
+  }),
+  filePath : Ember.computed.oneWay('jobFilePath',function(){
+    return Ember.copy(this.get('jobFilePath'));
+  }),
+  initialize : function(){
+    this.set('overwritePath', true);
   }.on('init'),
   rendered : function(){
     this.$("#configureJob").on('hidden.bs.modal', function () {
-      this.sendAction('closeJobConfigs');
+      this.sendAction('close');
     }.bind(this));
-    this.$("#configureJob").modal("show");    
+    this.$("#configureJob").modal("show");
   }.on('didInsertElement'),
   showNotification(data){
     if (!data){
@@ -79,12 +83,10 @@ export default Ember.Component.extend(Validations, {
   },
   saveJob(){
     var url = Ember.ENV.API_URL + "/saveWorkflowDraft?app.path=" + this.get("filePath") + "&overwrite=" + this.get("overwritePath");
-    var workflowData = this.get("jobXmlJSONStr");
-    this.saveWfJob(url, workflowData);
+    this.saveWfJob(url, this.get("jobJson"));
     if(!this.get('isDraft')){
        url = Ember.ENV.API_URL + "/saveWorkflow?app.path=" + this.get("filePath") + "&overwrite=" + this.get("overwritePath");
-       workflowData = this.get("jobXml");
-       this.saveWfJob(url, workflowData);
+       this.saveWfJob(url, this.get("jobXml"));
     }
   },
   saveWfJob(url, workflowData) {
@@ -95,7 +97,7 @@ export default Ember.Component.extend(Validations, {
           "message": "Workflow have been saved"
         });
         self.set("savingInProgress",false);
-        self.sendAction("saveFileinfo", this.get("filePath"), this.get("overwritePath"));
+        this.set('jobFilePath', this.get('filePath'));
     }.bind(this)).catch(function(response){
         console.log(response);
         self.set("savingInProgress",false);
@@ -106,7 +108,6 @@ export default Ember.Component.extend(Validations, {
           "details": self.getParsedErrorResponse(response),
           "stackTrace": self.getStackTrace(response.responseText)
         });
-        self.sendAction("saveFileinfo", self.get("filePath"), self.get("overwritePath"));
     });
   },
   getStackTrace(data){
@@ -157,11 +158,11 @@ export default Ember.Component.extend(Validations, {
       this.set("showingFileBrowser",false);
     },
     saveWorkflow(){
-		if(!this.get("validations.isInvalid")){
-	      this.sendAction("setFilePath", this.get("filePath"));
-	      this.set('showErrorMessage', true);
-	      this.saveJob();
-		}
+  		if(this.get('validations.isInvalid')){
+  	    this.set('showErrorMessage', true);
+  	    return;
+  		}
+      this.saveJob();
     },
     closePreview(){
       this.set("showingPreview",false);

http://git-wip-us.apache.org/repos/asf/ambari/blob/684c9e64/contrib/views/wfmanager/src/main/resources/ui/app/domain/actionjob_hanlder.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/domain/actionjob_hanlder.js b/contrib/views/wfmanager/src/main/resources/ui/app/domain/actionjob_hanlder.js
index 0bb2fb8..4cc89ef 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/domain/actionjob_hanlder.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/domain/actionjob_hanlder.js
@@ -343,7 +343,12 @@ var FSActionJobHandler=ActionJobHandler.extend({
   init(){
     this.mapping=[
       {xml:"name-node",domain:"nameNode"},
-      {xml:"configuration",customHandler:this.configurationMapper}
+      {xml:"configuration", customHandler:this.configurationMapper},
+      {xml:"delete"},
+      {xml:"mkdir"},
+      {xml:"move"},
+      {xml:"touchz"},
+      {xml:"chgrp"}
     ];
   },
   handle(nodeDomain,nodeObj,nodeName){

http://git-wip-us.apache.org/repos/asf/ambari/blob/684c9e64/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
index 249877b..8b42447 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/bundle-config.hbs
@@ -39,6 +39,11 @@
               </a>
             </li>
             <li>
+              <a class="pointer" href="#" data-toggle="modal" data-target="#ConfirmDialog" title="Reset Bundle" {{action "confirmReset"}}>
+                <i class="fa fa-refresh marginright5"></i>Reset
+              </a>
+            </li>
+            <li>
               <a class="pointer" href="#" data-toggle="modal" title="Preview Xml" {{action "preview"}}>
                   <i class="fa fa-eye marginright5"></i>Preview xml
                 </a>
@@ -46,17 +51,17 @@
             </ul>
           </div>
           <div class="btn-group" role="group" aria-label="...">
-            <button type="button" class="btn btn-default"  data-toggle="modal" data-target="#ConfirmDialog" title="New Workflow" {{action "confirmReset"}}>
-              <i class="fa fa-refresh"> Reset</i>
-            </button>
             <button type="button" class="btn btn-default" title="Bundle Versions" {{action "showVersionSettings" true}}>
               <i class="fa fa-cog marginright5"></i>Versions
             </button>
             <button  id="import-bundle-test" type="button" class="btn btn-default hide" title="Import Bundle Test" {{action "importBundleTest"}}>
               <i class="fa fa-download"></i>
             </button>
+            <button id="save-bundle" type="button" class="btn btn-default" title="Save coordinator in HDFS" {{action "save"}}>
+                <i class="fa fa-floppy-o"></i> Save
+            </button>
             <button type="button" class="btn btn-primary" title="Submit Bundle" {{action "submitBundle"}}>
-              <i class="fa fa-upload"> Submit</i>
+              <i class="fa fa-upload"></i> Submit
             </button>
           </div>
         </div>
@@ -132,3 +137,6 @@
 {{#if showVersionSettings}}
   {{bundle-version-settings bundle=bundle showVersionSettings="showVersionSettings" }}
 {{/if}}
+{{#if showingSaveWorkflow}}
+  {{save-wf type='wf' close="closeSave" jobFilePath=bundleFilePath openFileBrowser="openFileBrowser" closeFileBrowser="closeFileBrowser" jobConfigs=configForSave}}
+{{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/684c9e64/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
index 7ee7ecd..f906fd5 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/coord-config.hbs
@@ -38,6 +38,11 @@
                   <i class="fa fa-download"> Import</i>
                 </a>
              </li>
+             <li>
+               <a class="pointer" href="#" data-toggle="modal" data-target="#ConfirmDialog" title="Reset Coordinator" {{action "confirmReset"}}>
+                 <i class="fa fa-refresh marginright5"></i>Reset
+               </a>
+             </li>
               <li>
                 <a class="pointer" href="#" data-toggle="modal" title="Preview Xml" {{action "preview"}}>
                   <i class="fa fa-eye marginright5"></i>Preview xml
@@ -45,21 +50,33 @@
               </li>
             </ul>
           </div>
+          <div class="dropdown inlineBlock">
+            <button class="btn btn-default dropdown-toggle borderRightRadiusNone" type="button" data-toggle="dropdown"><i class="fa fa-cog marginright5"></i>Settings
+            <span class="caret"></span></button>
+            <ul class="dropdown-menu">
+              <li>
+                <a  class="pointer" title="Import workflow" title="Parameters Configuration" {{action "showParameterSettings" true}}>
+                  <i class="fa fa-cog marginright5"></i>Parameters
+                </a>
+             </li>
+              <li>
+                <a class="pointer" href="#" data-toggle="modal" data-target="#control-dialog" title="Coordinator Controls" {{action "showControlConfig"}}>
+                  <i class="fa fa-wrench marginright5"></i>Controls
+                </a>
+              </li>
+              <li>
+                <a class="pointer" href="#" title="Coordinator Versions" {{action "showVersionSettings" true}}>
+                  <i class="fa fa-cog marginright5"></i>Versions
+                </a>
+              </li>
+            </ul>
+          </div>
           <div class="btn-group" role="group" aria-label="...">
-            <button type="button" class="btn btn-default"  data-toggle="modal" data-target="#ConfirmDialog" title="New Workflow" {{action "confirmReset"}}>
-              <i class="fa fa-refresh"> Reset</i>
-            </button>
             <button  id="import-test" type="button" class="btn btn-default hide" title="Import coordinator Test" {{action "importCoordinatorTest"}}>
               <i class="fa fa-download"></i>
             </button>
-            <button type="button" class="btn btn-default" title="Parameters Configuration" {{action "showParameterSettings" true}}>
-              <i class="fa fa-cog marginright5"></i>Parameters
-            </button>
-            <button type="button" class="btn btn-default"  data-toggle="modal" data-target="#control-dialog" title="Coordinator Controls" {{action "showControlConfig"}}>
-              <i class="fa fa-wrench marginright5"></i>Controls
-            </button>
-            <button type="button" class="btn btn-default" title="Coordinator Versions" {{action "showVersionSettings" true}}>
-              <i class="fa fa-cog marginright5"></i>Versions
+            <button id="save-coord" type="button" class="btn btn-default" title="Save coordinator in HDFS" {{action "save"}}>
+                <i class="fa fa-floppy-o"></i> Save
             </button>
             <button type="button" class="btn btn-primary" title="Submit Coordinator" {{action "submitCoordinator"}}>
               <i class="fa fa-upload marginright5"></i>Submit
@@ -360,3 +377,6 @@ okBtnText="Continue" cancelBtnText="Cancel" onOk="resetCoordinator"}}{{/confirma
   </div>
 </div>
 {{/if}}
+{{#if showingSaveWorkflow}}
+  {{save-wf type='wf' close="closeSave" jobFilePath=coordinatorFilePath openFileBrowser="openFileBrowser" closeFileBrowser="closeFileBrowser" jobConfigs=configForSave}}
+{{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/684c9e64/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
index 01356b2..80af968 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/flow-designer.hbs
@@ -37,9 +37,6 @@
               <span class="caret"></span></button>
               <ul class="dropdown-menu">
                 <li>
-                  <!--a class="pointer" title="Save Draft" {{action "saveAsDraft"}}>
-                      <i class="fa fa-download marginright5"></i>Save As Draft
-                  </a-->
                   <a class="pointer" title="Import workflow from HDFS" {{action "showFileBrowser"}}>
                       <i class="fa fa-download marginright5"></i>Import from HDFS
                   </a>
@@ -48,11 +45,10 @@
                         <i class="fa fa-download marginright5"></i>Import from Local FS
                     {{/file-picker}}
                   </a>
-                  <a class="pointer" title="Save workflow" {{action "saveWorkflow" "save"}}>
-                      <i class="fa fa-floppy-o marginright5"></i>Save
-                  </a>
-                  <a class="pointer" title="Save as draft" {{action "saveWorkflow" "saveDraft"}}>
-                      <i class="fa fa-floppy-o marginright5"></i>Save As Draft
+                </li>
+                <li>
+                  <a  class="pointer" data-toggle="modal" data-target="#ConfirmDialog" title="Reset Workflow" {{action "conirmCreatingNewWorkflow"}}>
+                    <i class="fa fa-refresh marginright5"></i> Reset Workflow
                   </a>
                 </li>
                 <li>
@@ -80,26 +76,6 @@
             <i class="fa fa-download"></i>
         </button>
         <div class="btn-group">
-          <div class="dropdown">
-            <button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown"><i class="fa fa-refresh marginright5"></i>Reset
-            <span class="caret"></span></button>
-            <ul class="dropdown-menu">
-              <li>
-                <a  class="pointer" data-toggle="modal" data-target="#ConfirmDialog" title="Reset Workflow" {{action "conirmCreatingNewWorkflow"}}>
-                  Reset Workflow
-                </a>
-              </li>
-              {{#if useCytoscape}}
-              <li>
-                <a class="pointer" title="Reset Layout" {{action "resetLayout"}}>
-                  Reset Layout
-                </a>
-              </li>
-              {{/if}}
-            </ul>
-          </div>
-        </div>
-        <div class="btn-group">
           <div class="btn-group">
             <div class="dropdown">
               <button class="btn btn-default dropdown-toggle borderRightRadiusNone" type="button" data-toggle="dropdown"><i class="fa fa-ban marginright5"></i>Kill Nodes
@@ -143,24 +119,9 @@
               </ul>
             </div>
           </div>
-          <!--div class="btn-group">
-            <div class="dropdown">
-              <button class="btn btn-default dropdown-toggle borderRadiusNone" type="button" data-toggle="dropdown"><i class="marginright5"></i>More
-              <span class="caret"></span></button>
-              <ul class="dropdown-menu">
-                <li>
-                  <a href="#" data-toggle="modal" data-target="#previewModal"  title="Preview workflow" {{action "previewWorkflow"}}>
-                    <i class="fa fa-eye marginright5"></i>Preview xml
-                  </a>
-                </li>
-                <li>
-                  <a href="javascript:void(0)" data-toggle="modal" title="Download workflow" {{action "downloadWorkflowXml"}}>
-                    <i class="fa fa-download marginright5"></i>Download xml
-                  </a>
-                </li>
-              </ul>
-            </div>
-          </div-->
+          <button id="import-workflow-test" type="button" class="btn btn-default" title="Save Workflow in HDFS" {{action "saveWorkflow" "save"}}>
+              <i class="fa fa-floppy-o"></i> Save
+          </button>
           <button type="button" class="btn btn-default" title="Validate workflow" {{action "dryRunWorkflow"}}>
               <i class="fa fa-play marginright5"></i>Validate
           </button>
@@ -291,10 +252,10 @@
                 <i class="fa fa-upload"></i>
             </span>
             <span class="overlay-hdfs-asset-import-icon" title="Import asset from HDFS" {{action "showActionSettingsFileBrowser"}}>
-                <i class="fa fa-cloud-download"></i>
+              <i class="fa fa-cloud-download"></i>
             </span>
             <span class="overlay-hdfs-asset-export-icon" title="Publish Asset to HDFS" {{action "showExportActionNodeFileBrowser"}}>
-                <i class="fa fa-cloud-upload"></i>
+              <i class="fa fa-cloud-upload"></i>
             </span>
           </div>
           {{decision-add-branch node=node registerAddBranchAction="registerAddBranchAction" addDecisionBranch="addDecisionBranch" workflow=workflow}}
@@ -318,7 +279,7 @@
   {{workflow-action-editor actionType=currentAction closeActionEditor="closeActionEditor" setNodeTransitions="setNodeTransitions" actionModel=currentNode.domain nodeType=currentNode.type currentNode=currentNode killNodes=workflow.killNodes credentials=workflow.credentials}}
 {{/if}}
 {{#if showingSaveWorkflow}}
-  {{save-wf type='wf' saveFileinfo="saveFileinfo" closeJobConfigs="closeWorkflowSubmitConfigs" jobFilePath=workflowFilePath openFileBrowser="openFileBrowser" closeFileBrowser="closeFileBrowser" jobConfigs=workflowSubmitConfigs setFilePath="setFilePath" isDryrun=dryrun isDraft=isDraft jobXmlJSONStr=jobXmlJSONStr}}
+  {{save-wf type='wf' close="closeSaveWorkflow" jobFilePath=workflowFilePath openFileBrowser="openFileBrowser" closeFileBrowser="closeFileBrowser" jobConfigs=configForSave}}
 {{/if}}
 {{#if showingWorkflowConfigProps}}
   {{job-config type='wf' closeJobConfigs="closeWorkflowSubmitConfigs" jobFilePath=workflowFilePath openFileBrowser="openFileBrowser" closeFileBrowser="closeFileBrowser" jobConfigs=workflowSubmitConfigs isDryrun=dryrun}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/684c9e64/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/save-wf.hbs
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/save-wf.hbs b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/save-wf.hbs
index 65bac42..913dc04 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/save-wf.hbs
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/templates/components/save-wf.hbs
@@ -69,7 +69,7 @@
         <span class="pull-left">Saving {{displayName}}</span>
         {{/if}}
         <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
-        <button type="button" class="btn btn-primary" {{action "saveWorkflow"}}>Submit</button>
+        <button type="button" class="btn btn-primary" {{action "saveWorkflow"}}>Save</button>
       </div>
     </div>
   </div>


[16/50] [abbrv] ambari git commit: AMBARI-19586 Ranger Admin HA Wizard should display configs changes explicitly. (ababiichuk)

Posted by nc...@apache.org.
AMBARI-19586 Ranger Admin HA Wizard should display configs changes explicitly. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/71c5b1f3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/71c5b1f3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/71c5b1f3

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 71c5b1f342aefe156d0af19264da0d2f0184ed8f
Parents: e3e9f70
Author: ababiichuk <ab...@hortonworks.com>
Authored: Tue Jan 17 15:53:05 2017 +0200
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Tue Jan 17 17:00:09 2017 +0200

----------------------------------------------------------------------
 ambari-web/app/assets/test/tests.js             |   1 +
 .../rangerAdmin/step3_controller.js             |  43 ++++++-
 .../rangerAdmin/step4_controller.js             |   2 +-
 .../rangerAdmin/wizard_controller.js            |   2 +
 .../app/routes/ra_high_availability_routes.js   |   4 +
 .../highAvailability/rangerAdmin/step3.hbs      |   7 ++
 .../highAvailability/rangerAdmin/step3_view.js  |   6 +-
 .../rangerAdmin/step3_controller_test.js        | 114 +++++++++++++++++++
 8 files changed, 176 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/71c5b1f3/ambari-web/app/assets/test/tests.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/test/tests.js b/ambari-web/app/assets/test/tests.js
index 604d96b..e42c811 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -89,6 +89,7 @@ var files = [
   'test/controllers/main/admin/highAvailability/journalNode/step7_controller_test',
   'test/controllers/main/admin/highAvailability/journalNode/step8_controller_test',
   'test/controllers/main/admin/highAvailability/journalNode/wizard_controller_test',
+  'test/controllers/main/admin/highAvailability/rangerAdmin/step3_controller_test',
   'test/controllers/main/dashboard/config_history_controller_test',
   'test/controllers/main/charts/heatmap_test',
   'test/controllers/main/charts/heatmap_metrics/heatmap_metric_test',

http://git-wip-us.apache.org/repos/asf/ambari/blob/71c5b1f3/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step3_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step3_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step3_controller.js
index 475cc52..ea77d24 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step3_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step3_controller.js
@@ -19,6 +19,47 @@
 var App = require('app');
 
 App.RAHighAvailabilityWizardStep3Controller = Em.Controller.extend({
-  name: "rAHighAvailabilityWizardStep3Controller"
+  name: 'rAHighAvailabilityWizardStep3Controller',
+
+  isLoaded: false,
+
+  versionLoaded: true,
+
+  hideDependenciesInfoBar: true,
+
+  stepConfigs: [
+    App.ServiceConfig.create({
+      serviceName: 'MISC',
+      configCategories: [
+        App.ServiceConfigCategory.create({
+          name: 'RANGER',
+          displayName: App.format.role('RANGER', true)
+        })
+      ],
+      showConfig: true
+    })
+  ],
+
+  loadStep: function () {
+    var self = this;
+    App.get('router.mainController.isLoading').call(App.get('router.clusterController'), 'isConfigsPropertiesLoaded').done(function () {
+      var property = App.configsCollection.getConfigByName('policymgr_external_url', 'admin-properties'),
+        stepConfig = self.get('stepConfigs.firstObject');
+      stepConfig.set('configs', [
+        App.ServiceConfigProperty.create(property, {
+          category: 'RANGER',
+          value: self.get('content.loadBalancerURL')
+        })
+      ]);
+      self.setProperties({
+        isLoaded: true,
+        selectedService: stepConfig
+      });
+    });
+  },
+
+  updateConfigProperty: function () {
+    this.set('content.policymgrExternalURL', this.get('selectedService.configs.firstObject.value'));
+  }
 });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/71c5b1f3/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step4_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step4_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step4_controller.js
index b7818d9..3c32af9 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step4_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/step4_controller.js
@@ -63,7 +63,7 @@ App.RAHighAvailabilityWizardStep4Controller = App.HighAvailabilityProgressPageCo
   },
 
   onLoadConfigs: function (data) {
-    data.items.findProperty('type', 'admin-properties').properties['policymgr_external_url'] = this.get('content.loadBalancerURL');
+    data.items.findProperty('type', 'admin-properties').properties['policymgr_external_url'] = this.get('content.policymgrExternalURL');
     var configData = this.reconfigureSites(['admin-properties'], data, Em.I18n.t('admin.highAvailability.step4.save.configuration.note').format(App.format.role('RANGER_ADMIN', false)));
 
     App.ajax.send({

http://git-wip-us.apache.org/repos/asf/ambari/blob/71c5b1f3/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/wizard_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/wizard_controller.js b/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/wizard_controller.js
index 2bc6e37..fbb0692 100644
--- a/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/wizard_controller.js
+++ b/ambari-web/app/controllers/main/admin/highAvailability/rangerAdmin/wizard_controller.js
@@ -36,6 +36,7 @@ App.RAHighAvailabilityWizardController = App.WizardController.extend({
     controllerName: 'rAHighAvailabilityWizardController',
     cluster: null,
     loadBalancerURL: null,
+    policymgrExternalURL: null,
     hosts: null,
     services: null,
     masterComponentHosts: null
@@ -100,6 +101,7 @@ App.RAHighAvailabilityWizardController = App.WizardController.extend({
           this.loadTasksStatuses();
           this.loadTasksRequestIds();
           this.loadRequestIds();
+          this.load('policymgrExternalURL');
         }
       }
     ]

http://git-wip-us.apache.org/repos/asf/ambari/blob/71c5b1f3/ambari-web/app/routes/ra_high_availability_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/ra_high_availability_routes.js b/ambari-web/app/routes/ra_high_availability_routes.js
index ac975ab..dbf653b 100644
--- a/ambari-web/app/routes/ra_high_availability_routes.js
+++ b/ambari-web/app/routes/ra_high_availability_routes.js
@@ -135,6 +135,10 @@ module.exports = App.WizardRoute.extend({
       });
     },
     next: function (router) {
+      var controller = router.get('rAHighAvailabilityWizardController'),
+        stepController = router.get('rAHighAvailabilityWizardStep3Controller');
+      stepController.updateConfigProperty();
+      controller.save('policymgrExternalURL');
       router.transitionTo('step4');
     },
     back: function (router) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/71c5b1f3/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs b/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs
index 3c798c8..5342728 100644
--- a/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs
+++ b/ambari-web/app/templates/main/admin/highAvailability/rangerAdmin/step3.hbs
@@ -46,6 +46,13 @@
         <div class="alert alert-info">
           {{{t admin.rm_highAvailability.wizard.step3.configs_changes}}}
         </div>
+        {{#if isLoaded}}
+          <div id="serviceConfig">
+            {{view App.ServiceConfigView}}
+          </div>
+        {{else}}
+          {{view App.SpinnerView}}
+        {{/if}}
     </div>
   </div>
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/71c5b1f3/ambari-web/app/views/main/admin/highAvailability/rangerAdmin/step3_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/highAvailability/rangerAdmin/step3_view.js b/ambari-web/app/views/main/admin/highAvailability/rangerAdmin/step3_view.js
index 8aa0125..1c55608 100644
--- a/ambari-web/app/views/main/admin/highAvailability/rangerAdmin/step3_view.js
+++ b/ambari-web/app/views/main/admin/highAvailability/rangerAdmin/step3_view.js
@@ -21,6 +21,10 @@ var App = require('app');
 
 App.RAHighAvailabilityWizardStep3View = Em.View.extend({
 
-  templateName: require('templates/main/admin/highAvailability/rangerAdmin/step3')
+  templateName: require('templates/main/admin/highAvailability/rangerAdmin/step3'),
+
+  didInsertElement: function () {
+    this.get('controller').loadStep();
+  }
 
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/71c5b1f3/ambari-web/test/controllers/main/admin/highAvailability/rangerAdmin/step3_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/admin/highAvailability/rangerAdmin/step3_controller_test.js b/ambari-web/test/controllers/main/admin/highAvailability/rangerAdmin/step3_controller_test.js
new file mode 100644
index 0000000..649bcbc
--- /dev/null
+++ b/ambari-web/test/controllers/main/admin/highAvailability/rangerAdmin/step3_controller_test.js
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+var App = require('app');
+require('controllers/main/admin/highAvailability/rangerAdmin/step3_controller');
+require('controllers/main');
+
+describe('App.RAHighAvailabilityWizardStep3Controller', function () {
+
+  var controller;
+
+  beforeEach(function () {
+    controller = App.RAHighAvailabilityWizardStep3Controller.create();
+  });
+
+  describe('#loadStep', function () {
+
+    var dfd,
+      testCases = [
+        {
+          path: 'isLoaded',
+          result: true
+        },
+        {
+          path: 'selectedService.configs.length',
+          result: 1,
+          massage: 'configs length'
+        },
+        {
+          path: 'selectedService.configs.firstObject.name',
+          result: 'policymgr_external_url',
+          message: 'property name'
+        },
+        {
+          path: 'selectedService.configs.firstObject.category',
+          result: 'RANGER',
+          message: 'config category'
+        },
+        {
+          path: 'selectedService.configs.firstObject.value',
+          result: 'http://localhost:1111',
+          message: 'property value'
+        }
+      ];
+
+    beforeEach(function () {
+      dfd = $.Deferred();
+      sinon.stub(App.get('router.mainController'), 'isLoading').returns(dfd);
+      sinon.stub(App.configsCollection, 'getConfigByName').returns({
+        name: 'policymgr_external_url'
+      });
+      controller.set('content', {
+        loadBalancerURL: 'http://localhost:1111'
+      });
+      controller.loadStep();
+      dfd.resolve();
+    });
+
+    afterEach(function () {
+      App.get('router.mainController.isLoading').restore();
+      App.configsCollection.getConfigByName.restore();
+    });
+
+    testCases.forEach(function (testCase) {
+
+      it(testCase.message || testCase.path, function () {
+        expect(controller.get(testCase.path)).to.equal(testCase.result);
+      });
+
+    });
+
+  });
+
+  describe('#updateConfigProperty', function () {
+
+    beforeEach(function () {
+      controller.setProperties({
+        content: {
+          policymgrExternalURL: 'http://localhost:1111'
+        },
+        selectedService: {
+          configs: [
+            {
+              value: 'http://localhost:2222'
+            }
+          ]
+        }
+      });
+      controller.updateConfigProperty();
+    });
+
+    it('should update content.policymgrExternalURL', function () {
+      expect(controller.get('content.policymgrExternalURL')).to.equal('http://localhost:2222');
+    });
+
+  });
+
+});
\ No newline at end of file


[13/50] [abbrv] ambari git commit: AMBARI-19044 Install & configure Ranger plugin components independently of Ranger admin components (mugdha)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-audit.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-audit.xml
index b4c0790..5257549 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-audit.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-audit.xml
@@ -23,7 +23,7 @@
     <name>xasecure.audit.is.enabled</name>
     <value>true</value>
     <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db</name>
@@ -39,19 +39,19 @@
         <name>xasecure.audit.destination.db</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.url</name>
     <value>{{audit_jdbc_url}}</value>
     <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.user</name>
     <value>{{xa_audit_db_user}}</value>
     <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.password</name>
@@ -61,25 +61,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.driver</name>
     <value>{{jdbc_driver}}</value>
     <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.credential.provider.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.batch.filespool.dir</name>
     <value>/var/log/kafka/audit/db/spool</value>
     <description>/var/log/kafka/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs</name>
@@ -95,7 +95,7 @@
         <name>xasecure.audit.destination.hdfs</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.dir</name>
@@ -107,13 +107,13 @@
         <name>xasecure.audit.destination.hdfs.dir</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
     <value>/var/log/kafka/audit/hdfs/spool</value>
     <description>/var/log/kafka/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr</name>
@@ -129,7 +129,7 @@
         <name>xasecure.audit.destination.solr</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.urls</name>
@@ -144,7 +144,7 @@
         <name>ranger.audit.solr.urls</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.zookeepers</name>
@@ -156,13 +156,13 @@
         <name>ranger.audit.solr.zookeepers</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
     <value>/var/log/kafka/audit/solr/spool</value>
     <description>/var/log/kafka/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.provider.summary.enabled</name>
@@ -172,6 +172,6 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-plugin-properties.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-plugin-properties.xml
index 3949402..7f594a0 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-plugin-properties.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-plugin-properties.xml
@@ -24,7 +24,7 @@
     <value>ambari-qa</value>
     <display-name>Policy user for KAFKA</display-name>
     <description>This user must be system user and also present at Ranger admin portal</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.rpc.protection</name>
@@ -33,7 +33,7 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>common.name.for.certificate</name>
@@ -42,13 +42,13 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>zookeeper.connect</name>
     <value>localhost:2181</value>
     <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger-kafka-plugin-enabled</name>
@@ -65,14 +65,14 @@
       <type>boolean</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_USERNAME</name>
     <value>kafka</value>
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_PASSWORD</name>
@@ -83,6 +83,6 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-policymgr-ssl.xml
index cf4a82e..f0fc160 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-policymgr-ssl.xml
@@ -23,7 +23,7 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>kafkadev-clientcert.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.password</name>
@@ -33,13 +33,13 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>cacerts-xasecure.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.password</name>
@@ -49,18 +49,18 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
     <value>jceks://file/{{credential_file}}</value>
     <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
     <value>jceks://file/{{credential_file}}</value>
     <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-security.xml b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-security.xml
index 91061d1..a9f84a4 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-security.xml
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.9.0/configuration/ranger-kafka-security.xml
@@ -23,36 +23,42 @@
     <name>ranger.plugin.kafka.service.name</name>
     <value>{{repo_name}}</value>
     <description>Name of the Ranger service containing policies for this Kafka instance</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.kafka.policy.source.impl</name>
     <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
     <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.kafka.policy.rest.url</name>
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>ranger.plugin.kafka.policy.rest.ssl.config.file</name>
     <value>/etc/kafka/conf/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.kafka.policy.pollIntervalMs</name>
     <value>30000</value>
     <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.kafka.policy.cache.dir</name>
     <value>/etc/ranger/{{repo_name}}/policycache</value>
     <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
index ae9314b..7f85667 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
@@ -24,7 +24,7 @@
     <value>ambari-qa</value>
     <display-name>Policy user for KNOX</display-name>
     <description>This user must be system user and also present at Ranger admin portal</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>common.name.for.certificate</name>
@@ -33,7 +33,7 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger-knox-plugin-enabled</name>
@@ -50,14 +50,14 @@
       <type>boolean</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_USERNAME</name>
     <value>admin</value>
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_PASSWORD</name>
@@ -68,14 +68,14 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>KNOX_HOME</name>
     <value>/usr/local/knox-server</value>
     <display-name>Knox Home</display-name>
     <description>Knox home folder</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>XAAUDIT.DB.IS_ENABLED</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
index dd5fc3a..9b61a5f 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
@@ -38,6 +38,7 @@ from resource_management.libraries.functions.stack_features import check_stack_f
 from resource_management.libraries.functions.stack_features import get_stack_feature_version
 from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs
 
 # server configurations
 config = Script.get_config()
@@ -260,82 +261,86 @@ if security_enabled:
   _hostname_lowercase = config['hostname'].lower()
   knox_principal_name = config['configurations']['knox-env']['knox_principal_name'].replace('_HOST',_hostname_lowercase)
 
+# for curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger knox plugin start section
+
 # ranger host
 ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
 has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
 
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 
-# ranger knox properties
-policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-xa_db_host = config['configurations']['admin-properties']['db_host']
-repo_name = str(config['clusterName']) + '_knox'
-repo_name_value = config['configurations']['ranger-knox-security']['ranger.plugin.knox.service.name']
-if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
-  repo_name = repo_name_value
+# ranger knox plugin enabled property
+enable_ranger_knox = default("/configurations/ranger-knox-plugin-properties/ranger-knox-plugin-enabled", "No")
+enable_ranger_knox = True if enable_ranger_knox.lower() == 'yes' else False
+
+# get ranger knox properties if enable_ranger_knox is True
+if enable_ranger_knox:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-knox-security']['ranger.plugin.knox.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger knox service/repositry name
+  repo_name = str(config['clusterName']) + '_knox'
+  repo_name_value = config['configurations']['ranger-knox-security']['ranger.plugin.knox.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  knox_home = config['configurations']['ranger-knox-plugin-properties']['KNOX_HOME']
+  common_name_for_certificate = config['configurations']['ranger-knox-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_knox:
+    external_admin_username = default('/configurations/ranger-knox-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-knox-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-knox-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-knox-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-knox-plugin-properties']
+  policy_user = config['configurations']['ranger-knox-plugin-properties']['policy_user']
+  repo_config_password = config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
 
-knox_home = config['configurations']['ranger-knox-plugin-properties']['KNOX_HOME']
-common_name_for_certificate = config['configurations']['ranger-knox-plugin-properties']['common.name.for.certificate']
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
 
-repo_config_username = config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
 
-ranger_env = config['configurations']['ranger-env']
-ranger_plugin_properties = config['configurations']['ranger-knox-plugin-properties']
-policy_user = config['configurations']['ranger-knox-plugin-properties']['policy_user']
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
 
-#For curl command in ranger plugin to get db connector
-jdk_location = config['hostLevelParams']['jdk_location']
-java_share_dir = '/usr/share/java'
-if has_ranger_admin:
-  enable_ranger_knox = (config['configurations']['ranger-knox-plugin-properties']['ranger-knox-plugin-enabled'].lower() == 'yes')
-  xa_audit_db_password = ''
-  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-  repo_config_password = unicode(config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
-  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-  previous_jdbc_jar_name= None
-
-  if stack_supports_ranger_audit_db:
-    if xa_audit_db_flavor == 'mysql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "com.mysql.jdbc.Driver"
-    elif xa_audit_db_flavor == 'oracle':
-      jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-      colon_count = xa_db_host.count(':')
-      if colon_count == 2 or colon_count == 0:
-        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-      else:
-        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-      jdbc_driver = "oracle.jdbc.OracleDriver"
-    elif xa_audit_db_flavor == 'postgres':
-      jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "org.postgresql.Driver"
-    elif xa_audit_db_flavor == 'mssql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-    elif xa_audit_db_flavor == 'sqla':
-      jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_target = format("{stack_root}/current/knox-server/ext/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  previous_jdbc_jar = format("{stack_root}/current/knox-server/ext/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  sql_connector_jar = ''
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{stack_root}/current/knox-server/ext/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{stack_root}/current/knox-server/ext/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
 
   knox_ranger_plugin_config = {
     'username': repo_config_username,
@@ -368,21 +373,21 @@ if has_ranger_admin:
       'type': 'knox'
     }
 
-
-
   xa_audit_db_is_enabled = False
-  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
   if xml_configurations_supported and stack_supports_ranger_audit_db:
     xa_audit_db_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.db']
-  xa_audit_hdfs_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-  ssl_keystore_password = unicode(config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-  ssl_truststore_password = unicode(config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
 
-  #For SQLA explicitly disable audit to DB for Ranger
-  if xa_audit_db_flavor == 'sqla':
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-knox-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-knox-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor == 'sqla':
     xa_audit_db_is_enabled = False
 
+# ranger knox plugin end section
+
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
index 7601dfa..67a1670 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
@@ -25,8 +25,7 @@ from resource_management.libraries.functions.setup_ranger_plugin_xml import setu
 def setup_ranger_knox(upgrade_type=None):
   import params
 
-  if params.has_ranger_admin:
-
+  if params.enable_ranger_knox:
 
     stack_version = None
     if upgrade_type is not None:
@@ -105,4 +104,4 @@ def setup_ranger_knox(upgrade_type=None):
       Logger.info("Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations")
 
   else:
-    Logger.info('Ranger admin not installed')
+    Logger.info('Ranger Knox plugin is not enabled')

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml
index 95e653c..b0efb6d 100644
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/configuration/ranger-kms-security.xml
@@ -36,6 +36,12 @@
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
     <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>ranger.plugin.kms.policy.rest.ssl.config.file</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-audit.xml b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-audit.xml
index 4dc51eb..b7cf4c5 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-audit.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-audit.xml
@@ -23,7 +23,7 @@
     <name>xasecure.audit.is.enabled</name>
     <value>true</value>
     <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db</name>
@@ -39,19 +39,19 @@
         <name>xasecure.audit.destination.db</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.url</name>
     <value>{{audit_jdbc_url}}</value>
     <description>Audit DB JDBC URL</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.user</name>
     <value>{{xa_audit_db_user}}</value>
     <description>Audit DB JDBC User</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.password</name>
@@ -61,25 +61,25 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.jdbc.driver</name>
     <value>{{jdbc_driver}}</value>
     <description>Audit DB JDBC Driver</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.credential.provider.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>Credential file store</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.db.batch.filespool.dir</name>
     <value>/var/log/storm/audit/db/spool</value>
     <description>/var/log/storm/audit/db/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs</name>
@@ -95,7 +95,7 @@
         <name>xasecure.audit.destination.hdfs</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.dir</name>
@@ -107,13 +107,13 @@
         <name>xasecure.audit.destination.hdfs.dir</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
     <value>/var/log/storm/audit/hdfs/spool</value>
     <description>/var/log/storm/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr</name>
@@ -129,7 +129,7 @@
         <name>xasecure.audit.destination.solr</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.urls</name>
@@ -144,7 +144,7 @@
         <name>ranger.audit.solr.urls</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.zookeepers</name>
@@ -156,13 +156,13 @@
         <name>ranger.audit.solr.zookeepers</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
     <value>/var/log/storm/audit/solr/spool</value>
     <description>/var/log/storm/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.audit.provider.summary.enabled</name>
@@ -172,6 +172,6 @@
     <value-attributes>
       <type>boolean</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-policymgr-ssl.xml b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-policymgr-ssl.xml
index b1f6e1e..9592914 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-policymgr-ssl.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-policymgr-ssl.xml
@@ -23,7 +23,7 @@
     <name>xasecure.policymgr.clientssl.keystore</name>
     <value>hadoopdev-clientcert.jks</value>
     <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.password</name>
@@ -33,13 +33,13 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore</name>
     <value>cacerts-xasecure.jks</value>
     <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.password</name>
@@ -49,18 +49,18 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
     <value>jceks://file{{credential_file}}</value>
     <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-security.xml b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-security.xml
index 983702f..84e394b4 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-security.xml
+++ b/ambari-server/src/main/resources/common-services/STORM/0.10.0/configuration/ranger-storm-security.xml
@@ -23,36 +23,42 @@
     <name>ranger.plugin.storm.service.name</name>
     <value>{{repo_name}}</value>
     <description>Name of the Ranger service containing policies for this Storm instance</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.storm.policy.source.impl</name>
     <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
     <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.storm.policy.rest.url</name>
     <value>{{policymgr_mgr_url}}</value>
     <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
+    <depends-on>
+      <property>
+        <type>admin-properties</type>
+        <name>policymgr_external_url</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>ranger.plugin.storm.policy.rest.ssl.config.file</name>
     <value>/etc/storm/conf/ranger-policymgr-ssl.xml</value>
     <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.storm.policy.pollIntervalMs</name>
     <value>30000</value>
     <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger.plugin.storm.policy.cache.dir</name>
     <value>/etc/ranger/{{repo_name}}/policycache</value>
     <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
index dbb26f6..137f29a 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/params_linux.py
@@ -41,6 +41,7 @@ from resource_management.libraries.functions.expect import expect
 from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster
 from resource_management.libraries.functions import is_empty
 from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs
 
 # server configurations
 config = Script.get_config()
@@ -225,34 +226,8 @@ if enable_atlas_hook:
     jar_jvm_opts += '-Datlas.conf=' + atlas_conf_dir
 #endregion
 
-
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-#ranger storm properties
-policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-xa_db_host = config['configurations']['admin-properties']['db_host']
-repo_name = str(config['clusterName']) + '_storm'
-repo_name_value = config['configurations']['ranger-storm-security']['ranger.plugin.storm.service.name']
-if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
-  repo_name = repo_name_value
-
-common_name_for_certificate = config['configurations']['ranger-storm-plugin-properties']['common.name.for.certificate']
-
 storm_ui_port = config['configurations']['storm-site']['ui.port']
 
-repo_config_username = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
-ranger_env = config['configurations']['ranger-env']
-ranger_plugin_properties = config['configurations']['ranger-storm-plugin-properties']
-policy_user = storm_user
-
 #Storm log4j properties
 storm_a1_maxfilesize = default('/configurations/storm-cluster-log4j/storm_a1_maxfilesize', 100)
 storm_a1_maxbackupindex = default('/configurations/storm-cluster-log4j/storm_a1_maxbackupindex', 9)
@@ -269,55 +244,87 @@ storm_worker_log4j_content = config['configurations']['storm-worker-log4j']['con
 # some commands may need to supply the JAAS location when running as storm
 storm_jaas_file = format("{conf_dir}/storm_jaas.conf")
 
-# For curl command in ranger plugin to get db connector
+# for curl command in ranger plugin to get db connector
 jdk_location = config['hostLevelParams']['jdk_location']
-java_share_dir = '/usr/share/java'
 
-if has_ranger_admin:
-  enable_ranger_storm = (config['configurations']['ranger-storm-plugin-properties']['ranger-storm-plugin-enabled'].lower() == 'yes')
+# ranger storm plugin start section
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger storm plugin enabled property
+enable_ranger_storm = default("/configurations/ranger-storm-plugin-properties/ranger-storm-plugin-enabled", "No")
+enable_ranger_storm = True if enable_ranger_storm.lower() == 'yes' else False
+
+# ranger storm properties
+if enable_ranger_storm:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+  if xml_configurations_supported:
+    policymgr_mgr_url = config['configurations']['ranger-storm-security']['ranger.plugin.storm.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  # ranger storm service name
+  repo_name = str(config['clusterName']) + '_storm'
+  repo_name_value = config['configurations']['ranger-storm-security']['ranger.plugin.storm.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  common_name_for_certificate = config['configurations']['ranger-storm-plugin-properties']['common.name.for.certificate']
+  repo_config_username = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_storm:
+    external_admin_username = default('/configurations/ranger-storm-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-storm-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-storm-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-storm-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-storm-plugin-properties']
+  policy_user = storm_user
+  repo_config_password = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
   xa_audit_db_password = ''
-  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-  repo_config_password = unicode(config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
-  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  repo_config_password = config['configurations']['ranger-storm-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+
+  downloaded_custom_connector = None
   previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
 
-  if stack_supports_ranger_audit_db:
-    if xa_audit_db_flavor == 'mysql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "com.mysql.jdbc.Driver"
-    elif xa_audit_db_flavor == 'oracle':
-      jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-      colon_count = xa_db_host.count(':')
-      if colon_count == 2 or colon_count == 0:
-        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-      else:
-        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-      jdbc_driver = "oracle.jdbc.OracleDriver"
-    elif xa_audit_db_flavor == 'postgres':
-      jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-      jdbc_driver = "org.postgresql.Driver"
-    elif xa_audit_db_flavor == 'mssql':
-      jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-    elif xa_audit_db_flavor == 'sqla':
-      jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  driver_curl_target = format("{storm_component_home_dir}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  previous_jdbc_jar = format("{storm_component_home_dir}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
-  sql_connector_jar = ''
+    downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    driver_curl_target = format("{storm_component_home_dir}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    previous_jdbc_jar = format("{storm_component_home_dir}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+    sql_connector_jar = ''
 
   storm_ranger_plugin_config = {
     'username': repo_config_username,
@@ -356,18 +363,20 @@ if has_ranger_admin:
     ranger_storm_keytab = storm_keytab_path
 
   xa_audit_db_is_enabled = False
-  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
   if xml_configurations_supported and stack_supports_ranger_audit_db:
     xa_audit_db_is_enabled = config['configurations']['ranger-storm-audit']['xasecure.audit.destination.db']
+
   xa_audit_hdfs_is_enabled = default('/configurations/ranger-storm-audit/xasecure.audit.destination.hdfs', False)
-  ssl_keystore_password = unicode(config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-  ssl_truststore_password = unicode(config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
+  ssl_keystore_password = config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-storm-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
 
-  #For SQLA explicitly disable audit to DB for Ranger
-  if xa_audit_db_flavor == 'sqla':
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla':
     xa_audit_db_is_enabled = False
 
+# ranger storm plugin end section
+
 namenode_hosts = default("/clusterHostInfo/namenode_host", [])
 has_namenode = not len(namenode_hosts) == 0
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/setup_ranger_storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/setup_ranger_storm.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/setup_ranger_storm.py
index e81d62a..c04496e 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/setup_ranger_storm.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1/package/scripts/setup_ranger_storm.py
@@ -28,7 +28,7 @@ def setup_ranger_storm(upgrade_type=None):
   :param upgrade_type: Upgrade Type such as "rolling" or "nonrolling"
   """
   import params
-  if params.has_ranger_admin and params.security_enabled:
+  if params.enable_ranger_storm and params.security_enabled:
 
     stack_version = None
     if upgrade_type is not None:
@@ -130,4 +130,4 @@ def setup_ranger_storm(upgrade_type=None):
     else:
       Logger.info("Stack does not support core-site.xml creation for Ranger plugin, skipping core-site.xml configurations")
   else:
-    Logger.info('Ranger admin not installed')
+    Logger.info('Ranger Storm plugin is not enabled')

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/ranger-storm-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/ranger-storm-plugin-properties.xml b/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/ranger-storm-plugin-properties.xml
new file mode 100644
index 0000000..3450970
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/STORM/1.0.1/configuration/ranger-storm-plugin-properties.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+
+  <property>
+    <name>external_admin_username</name>
+    <value></value>
+    <display-name>External Ranger admin username</display-name>
+    <description>Add ranger default admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_admin_password</name>
+    <value></value>
+    <display-name>External Ranger admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_username</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin username</display-name>
+    <description>Add ranger default ambari admin username if want to communicate to external ranger</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>external_ranger_admin_password</name>
+    <value></value>
+    <display-name>External Ranger Ambari admin password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Add ranger default ambari admin password if want to communicate to external ranger</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index 177e0e0..653fa0a 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -35,6 +35,7 @@ from resource_management.libraries.functions.default import default
 from resource_management.libraries import functions
 from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions.get_architecture import get_architecture
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs
 
 import status_params
 
@@ -303,9 +304,6 @@ tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
 #for create_hdfs_directory
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-
-
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
@@ -350,12 +348,6 @@ node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enab
 
 cgroups_dir = "/cgroups_test/cpu"
 
-# ***********************  RANGER PLUGIN CHANGES ***********************
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 # hostname of the active HDFS HA Namenode (only used when HA is enabled)
 dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
 if dfs_ha_namenode_active is not None: 
@@ -386,106 +378,119 @@ if rm_ha_enabled:
     rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property]
     rm_webapp_addresses_list.append(rm_webapp_address)
 
-#ranger yarn properties
-if has_ranger_admin:
-  is_supported_yarn_ranger = config['configurations']['yarn-env']['is_supported_yarn_ranger']
-
-  if is_supported_yarn_ranger:
-    enable_ranger_yarn = (config['configurations']['ranger-yarn-plugin-properties']['ranger-yarn-plugin-enabled'].lower() == 'yes')
-    policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-    if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-      policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-    xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-    xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-    xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-    xa_audit_db_password = ''
-    if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-      xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-    xa_db_host = config['configurations']['admin-properties']['db_host']
-    repo_name = str(config['clusterName']) + '_yarn'
-    repo_name_value = config['configurations']['ranger-yarn-security']['ranger.plugin.yarn.service.name']
-    if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
-      repo_name = repo_name_value
-
-    ranger_env = config['configurations']['ranger-env']
-    ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
-    policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
-    yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']  
-
-    ranger_plugin_config = {
-      'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
-      'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
-      'yarn.url' : format('{scheme}://{yarn_rest_url}'),
-      'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
-    }
-
-    yarn_ranger_plugin_repo = {
-      'isEnabled': 'true',
-      'configs': ranger_plugin_config,
-      'description': 'yarn repo',
-      'name': repo_name,
-      'repositoryType': 'yarn',
-      'type': 'yarn',
-      'assetType': '1'
-    }
-
-    if stack_supports_ranger_kerberos:
-      ranger_plugin_config['ambari.service.check.user'] = policy_user
-      ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
-
-    if stack_supports_ranger_kerberos and security_enabled:
-      ranger_plugin_config['policy.download.auth.users'] = yarn_user
-      ranger_plugin_config['tag.download.auth.users'] = yarn_user
-
-    #For curl command in ranger plugin to get db connector
-    jdk_location = config['hostLevelParams']['jdk_location']
-    java_share_dir = '/usr/share/java'
-    previous_jdbc_jar_name = None
-    if stack_supports_ranger_audit_db:
-      if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "com.mysql.jdbc.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
-        jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-        colon_count = xa_db_host.count(':')
-        if colon_count == 2 or colon_count == 0:
-          audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-        else:
-          audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-        jdbc_driver = "oracle.jdbc.OracleDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
-        jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "org.postgresql.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-        jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
-        jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-        jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+# for curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+
+# ranger yarn plugin section start
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger yarn plugin enabled property
+enable_ranger_yarn = default("/configurations/ranger-yarn-plugin-properties/ranger-yarn-plugin-enabled", "No")
+enable_ranger_yarn = True if enable_ranger_yarn.lower() == 'yes' else False
+
+# ranger yarn-plugin supported flag, instead of using is_supported_yarn_ranger/yarn-env, using stack feature
+is_supported_yarn_ranger = check_stack_feature(StackFeature.YARN_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
+
+# get ranger yarn properties if enable_ranger_yarn is True
+if enable_ranger_yarn and is_supported_yarn_ranger:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['ranger-yarn-security']['ranger.plugin.yarn.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  # ranger yarn service/repository name
+  repo_name = str(config['clusterName']) + '_yarn'
+  repo_name_value = config['configurations']['ranger-yarn-security']['ranger.plugin.yarn.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_yarn:
+    external_admin_username = default('/configurations/ranger-yarn-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-yarn-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-yarn-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-yarn-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
+  policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
+  yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']
+
+  ranger_plugin_config = {
+    'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+    'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
+    'yarn.url' : format('{scheme}://{yarn_rest_url}'),
+    'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
+  }
+
+  yarn_ranger_plugin_repo = {
+    'isEnabled': 'true',
+    'configs': ranger_plugin_config,
+    'description': 'yarn repo',
+    'name': repo_name,
+    'repositoryType': 'yarn',
+    'type': 'yarn',
+    'assetType': '1'
+  }
+
+  if stack_supports_ranger_kerberos:
+    ranger_plugin_config['ambari.service.check.user'] = policy_user
+    ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    ranger_plugin_config['policy.download.auth.users'] = yarn_user
+    ranger_plugin_config['tag.download.auth.users'] = yarn_user
+
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
 
     downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
     driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
     driver_curl_target = format("{hadoop_yarn_home}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
     previous_jdbc_jar = format("{hadoop_yarn_home}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
 
+  xa_audit_db_is_enabled = False
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
+
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor == 'sqla':
     xa_audit_db_is_enabled = False
-    ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
-    if xml_configurations_supported and stack_supports_ranger_audit_db:
-      xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
-    xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-    ssl_keystore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-    ssl_truststore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-    credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
-
-    #For SQLA explicitly disable audit to DB for Ranger
-    if xa_audit_db_flavor == 'sqla':
-      xa_audit_db_is_enabled = False
+
+# ranger yarn plugin end section

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index 3207f27..f2e6660 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -115,7 +115,7 @@ class ResourcemanagerDefault(Resourcemanager):
 
     env.set_params(params)
     self.configure(env) # FOR SECURITY
-    if params.has_ranger_admin and params.is_supported_yarn_ranger:
+    if params.enable_ranger_yarn and params.is_supported_yarn_ranger:
       setup_ranger_yarn() #Ranger Yarn Plugin related calls
 
     # wait for active-dir and done-dir to be created by ATS if needed

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py
index 6ea7f82..d29e4dc 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/setup_ranger_yarn.py
@@ -19,7 +19,7 @@ from resource_management.core.logger import Logger
 def setup_ranger_yarn():
   import params
 
-  if params.has_ranger_admin:
+  if params.enable_ranger_yarn:
 
     from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
 
@@ -68,4 +68,4 @@ def setup_ranger_yarn():
                         component_user_keytab=params.rm_keytab if params.security_enabled else None
       )
   else:
-    Logger.info('Ranger admin not installed')
+    Logger.info('Ranger Yarn plugin is not enabled')

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index a64af73..6801d5a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -122,7 +122,7 @@
       "name": "ranger_audit_db_support",
       "description": "Ranger Audit to DB support",
       "min_version": "2.2.0.0",
-      "max_version": "2.5.0.0"
+      "max_version": "2.4.99.99"
     },
     {
       "name": "accumulo_kerberos_user_auth",
@@ -334,6 +334,21 @@
       "min_version": "2.6.0.0"
     },
     {
+      "name": "ranger_xml_configuration",
+      "description": "Ranger code base support xml configurations",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_ranger_plugin_support",
+      "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "yarn_ranger_plugin_support",
+      "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+      "min_version": "2.3.0.0"
+    },
+    {
       "name": "ranger_solr_config_support",
       "description": "Showing Ranger solrconfig.xml on UI",
       "min_version": "2.6.0.0"

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
index 960c751..0de538d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
@@ -26,7 +26,7 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>policy_user</name>
@@ -39,7 +39,7 @@
       </property>
     </depends-on>
     <description>This user must be system user and also present at Ranger admin portal</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger-hbase-plugin-enabled</name>
@@ -56,14 +56,14 @@
         <name>ranger-hbase-plugin-enabled</name>
       </property>
     </depends-on>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_USERNAME</name>
     <value>hbase</value>
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_PASSWORD</name>
@@ -74,7 +74,7 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>XAAUDIT.DB.IS_ENABLED</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
index c57c5f0..7460d26 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
@@ -17,7 +17,7 @@
     <display-name>Policy user for HDFS</display-name>
     <description>This user must be system user and also present at Ranger
 			admin portal</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>hadoop.rpc.protection</name>
@@ -27,7 +27,7 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>common.name.for.certificate</name>
@@ -36,7 +36,7 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>ranger-hdfs-plugin-enabled</name>
@@ -53,7 +53,7 @@
       <type>boolean</type>
       <overridable>false</overridable>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_USERNAME</name>
@@ -61,7 +61,7 @@
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin
 		</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_PASSWORD</name>
@@ -73,7 +73,7 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>XAAUDIT.DB.IS_ENABLED</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
index 830c539..0db5565 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
@@ -24,13 +24,13 @@
     <value>ambari-qa</value>
     <display-name>Policy user for HIVE</display-name>
     <description>This user must be system user and also present at Ranger admin portal</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>jdbc.driverClassName</name>
     <value>org.apache.hive.jdbc.HiveDriver</value>
     <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>common.name.for.certificate</name>
@@ -39,14 +39,14 @@
     <value-attributes>
       <empty-value-valid>true</empty-value-valid>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_USERNAME</name>
     <value>hive</value>
     <display-name>Ranger repository config user</display-name>
     <description>Used for repository creation on ranger admin</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>REPOSITORY_CONFIG_PASSWORD</name>
@@ -57,7 +57,7 @@
     <value-attributes>
       <type>password</type>
     </value-attributes>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
   <property>
     <name>XAAUDIT.DB.IS_ENABLED</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1524fd77/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml
index d5880dd..ad2b1e4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/KNOX/configuration/ranger-knox-plugin-properties.xml
@@ -24,6 +24,6 @@
     <value>/usr/hdp/current/knox-server</value>
     <display-name>Knox Home</display-name>
     <description>Knox home folder</description>
-    <on-ambari-upgrade add="false"/>
+    <on-ambari-upgrade add="true"/>
   </property>
 </configuration>


[23/50] [abbrv] ambari git commit: AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json.orig
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json.orig b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json.orig
new file mode 100644
index 0000000..147c1c0
--- /dev/null
+++ b/ambari-web/app/assets/data/stacks/HDP-2.1/service_components.json.orig
@@ -0,0 +1,3170 @@
+
+{
+  "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services?fields=StackServices/*,components/*,components/dependencies/Dependencies/scope,artifacts/Artifacts/artifact_name",
+  "items" : [
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS",
+      "StackServices" : {
+        "comments" : "A system for metrics collection that provides storage and retrieval capability for metrics collected from the cluster\n      ",
+        "custom_commands" : [ ],
+        "display_name" : "Ambari Metrics",
+        "required_services" : [
+          "ZOOKEEPER"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "AMBARI_METRICS",
+        "service_version" : "0.1.0",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "ams-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "ams-hbase-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "ams-hbase-log4j" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "ams-hbase-policy" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          },
+          "ams-hbase-security-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "ams-hbase-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "ams-log4j" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "ams-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          }
+        },
+        "kerberos_descriptor" : {
+          "components" : [
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "ams-hbase-security-site/hbase.master.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${ams-env/ambari_metrics_user}",
+                    "value" : "amshbase/_HOST@${realm}"
+                  },
+                  "name" : "ams_hbase_master_hbase",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${ams-env/ambari_metrics_user}"
+                    },
+                    "file" : "${keytab_dir}/ams-hbase.master.keytab",
+                    "configuration" : "ams-hbase-security-site/hbase.master.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "ams-hbase-security-site/hbase.regionserver.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${ams-env/ambari_metrics_user}",
+                    "value" : "amshbase/_HOST@${realm}"
+                  },
+                  "name" : "ams_hbase_regionserver_hbase",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${ams-env/ambari_metrics_user}"
+                    },
+                    "file" : "${keytab_dir}/ams-hbase.regionserver.keytab",
+                    "configuration" : "ams-hbase-security-site/hbase.regionserver.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "ams-hbase-security-site/hbase.myclient.principal",
+                    "type" : "service",
+                    "local_username" : "${ams-env/ambari_metrics_user}",
+                    "value" : "amshbase/_HOST@${realm}"
+                  },
+                  "name" : "ams_collector",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${ams-env/ambari_metrics_user}"
+                    },
+                    "file" : "${keytab_dir}/ams.collector.keytab",
+                    "configuration" : "ams-hbase-security-site/hbase.myclient.keytab",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "ams-hbase-security-site/ams.zookeeper.principal",
+                    "type" : "service",
+                    "local_username" : "${ams-env/ambari_metrics_user}",
+                    "value" : "zookeeper/_HOST@${realm}"
+                  },
+                  "name" : "ams_zookeeper",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${ams-env/ambari_metrics_user}"
+                    },
+                    "file" : "${keytab_dir}/zk.service.ams.keytab",
+                    "configuration" : "ams-hbase-security-site/ams.zookeeper.keytab",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                }
+              ],
+              "configurations" : [
+                {
+                  "ams-hbase-security-site" : {
+                    "hbase.coprocessor.master.classes" : "org.apache.hadoop.hbase.security.access.AccessController",
+                    "hadoop.security.authentication" : "kerberos",
+                    "hbase.security.authentication" : "kerberos",
+                    "hbase.coprocessor.region.classes" : "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController",
+                    "hbase.security.authorization" : "true",
+                    "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal" : "true",
+                    "hbase.zookeeper.property.jaasLoginRenew" : "3600000",
+                    "hbase.zookeeper.property.authProvider.1" : "org.apache.zookeeper.server.auth.SASLAuthenticationProvider",
+                    "hbase.zookeeper.property.kerberos.removeHostFromPrincipal" : "true"
+                  }
+                },
+                {
+                  "ams-hbase-site": {
+                    "zookeeper.znode.parent": "/ams-hbase-secure"
+                  }
+                }
+              ],
+              "name" : "METRICS_COLLECTOR"
+            }
+          ],
+          "identities" : [
+            {
+              "name" : "/spnego"
+            },
+            {
+              "name" : "/hdfs"
+            }
+          ],
+          "name" : "AMBARI_METRICS"
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/components/METRICS_COLLECTOR",
+          "StackServiceComponents" : {
+            "cardinality" : "1",
+            "component_category" : "MASTER",
+            "component_name" : "METRICS_COLLECTOR",
+            "custom_commands" : [ ],
+            "display_name" : "Metrics Collector",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "AMBARI_METRICS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/components/METRICS_COLLECTOR/dependencies/ZOOKEEPER_SERVER",
+              "Dependencies" : {
+                "component_name" : "ZOOKEEPER_SERVER",
+                "dependent_component_name" : "METRICS_COLLECTOR",
+                "dependent_service_name" : "AMBARI_METRICS",
+                "scope" : "cluster",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/components/METRICS_MONITOR",
+          "StackServiceComponents" : {
+            "cardinality" : "ALL",
+            "component_category" : "SLAVE",
+            "component_name" : "METRICS_MONITOR",
+            "custom_commands" : [ ],
+            "display_name" : "Metrics Monitor",
+            "is_client" : false,
+            "is_master" : false,
+            "service_name" : "AMBARI_METRICS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "auto_deploy" : {
+            "enabled" : true
+          },
+          "dependencies" : [ ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/artifacts/kerberos_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "kerberos_descriptor",
+            "service_name" : "AMBARI_METRICS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/AMBARI_METRICS/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "AMBARI_METRICS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON",
+      "StackServices" : {
+        "comments" : "Data management and processing platform",
+        "custom_commands" : [ ],
+        "display_name" : "Falcon",
+        "required_services" : [
+          "OOZIE"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "FALCON",
+        "service_version" : "0.5.0.2.1",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "falcon-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "falcon-runtime.properties" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "falcon-startup.properties" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          }
+        },
+        "kerberos_descriptor" : {
+          "components" : [
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "falcon-startup.properties/*.falcon.service.authentication.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${falcon-env/falcon_user}",
+                    "value" : "falcon/_HOST@${realm}"
+                  },
+                  "name" : "falcon_server",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${falcon-env/falcon_user}"
+                    },
+                    "file" : "${keytab_dir}/falcon.service.keytab",
+                    "configuration" : "falcon-startup.properties/*.falcon.service.authentication.kerberos.keytab",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "falcon-startup.properties/*.falcon.http.authentication.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : null,
+                    "value" : "HTTP/_HOST@${realm}"
+                  },
+                  "name" : "/spnego",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : null,
+                      "name" : null
+                    },
+                    "file" : null,
+                    "configuration" : "falcon-startup.properties/*.falcon.http.authentication.kerberos.keytab",
+                    "group" : {
+                      "access" : null,
+                      "name" : null
+                    }
+                  }
+                }
+              ],
+              "name" : "FALCON_SERVER"
+            }
+          ],
+          "configurations" : [
+            {
+              "falcon-startup.properties" : {
+                "*.dfs.namenode.kerberos.principal" : "nn/_HOST@${realm}",
+                "*.falcon.http.authentication.type" : "kerberos",
+                "*.falcon.authentication.type" : "kerberos"
+              }
+            }
+          ],
+          "identities" : [
+            {
+              "name" : "/spnego"
+            },
+            {
+              "name" : "/smokeuser"
+            },
+            {
+              "name" : "/hdfs"
+            }
+          ],
+          "name" : "FALCON"
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/components/FALCON_CLIENT",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "CLIENT",
+            "component_name" : "FALCON_CLIENT",
+            "custom_commands" : [ ],
+            "display_name" : "Falcon Client",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "FALCON",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/components/FALCON_SERVER",
+          "StackServiceComponents" : {
+            "cardinality" : "1",
+            "component_category" : "MASTER",
+            "component_name" : "FALCON_SERVER",
+            "custom_commands" : [ ],
+            "display_name" : "Falcon Server",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "FALCON",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/components/FALCON_SERVER/dependencies/OOZIE_CLIENT",
+              "Dependencies" : {
+                "component_name" : "OOZIE_CLIENT",
+                "dependent_component_name" : "FALCON_SERVER",
+                "dependent_service_name" : "FALCON",
+                "scope" : "cluster",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/components/FALCON_SERVER/dependencies/OOZIE_SERVER",
+              "Dependencies" : {
+                "component_name" : "OOZIE_SERVER",
+                "dependent_component_name" : "FALCON_SERVER",
+                "dependent_service_name" : "FALCON",
+                "scope" : "cluster",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/artifacts/kerberos_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "kerberos_descriptor",
+            "service_name" : "FALCON",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FALCON/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "FALCON",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FLUME",
+      "StackServices" : {
+        "comments" : "A distributed service for collecting, aggregating, and moving large amounts of streaming data into HDFS",
+        "custom_commands" : [ ],
+        "display_name" : "Flume",
+        "required_services" : [
+          "HDFS"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "FLUME",
+        "service_version" : "1.4.0.2.1",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "flume-conf" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "flume-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          }
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FLUME/components/FLUME_HANDLER",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "SLAVE",
+            "component_name" : "FLUME_HANDLER",
+            "custom_commands" : [ ],
+            "display_name" : "Flume",
+            "is_client" : false,
+            "is_master" : false,
+            "service_name" : "FLUME",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/FLUME/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "FLUME",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/GANGLIA",
+      "StackServices" : {
+        "comments" : "Ganglia Metrics Collection system (<a href=\"http://oss.oetiker.ch/rrdtool/\" target=\"_blank\">RRDTool</a> will be installed too)",
+        "custom_commands" : [ ],
+        "display_name" : "Ganglia",
+        "required_services" : [ ],
+        "service_check_supported" : false,
+        "service_name" : "GANGLIA",
+        "service_version" : "3.5.0",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "ganglia-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          }
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/GANGLIA/components/GANGLIA_MONITOR",
+          "StackServiceComponents" : {
+            "cardinality" : "ALL",
+            "component_category" : "SLAVE",
+            "component_name" : "GANGLIA_MONITOR",
+            "custom_commands" : [ ],
+            "display_name" : "Ganglia Monitor",
+            "is_client" : false,
+            "is_master" : false,
+            "service_name" : "GANGLIA",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "auto_deploy" : {
+            "enabled" : true
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/GANGLIA/components/GANGLIA_SERVER",
+          "StackServiceComponents" : {
+            "cardinality" : "1",
+            "component_category" : "MASTER",
+            "component_name" : "GANGLIA_SERVER",
+            "custom_commands" : [ ],
+            "display_name" : "Ganglia Server",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "GANGLIA",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/GANGLIA/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "GANGLIA",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE",
+      "StackServices" : {
+        "comments" : "Non-relational distributed database and centralized service for configuration management &\n        synchronization\n      ",
+        "custom_commands" : [ ],
+        "display_name" : "HBase",
+        "required_services" : [
+          "ZOOKEEPER",
+          "HDFS"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "HBASE",
+        "service_version" : "0.98.0.2.1",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "hbase-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "hbase-log4j" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "hbase-policy" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          },
+          "hbase-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          }
+        },
+        "kerberos_descriptor" : {
+          "components" : [
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "hbase-site/hbase.regionserver.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${hbase-env/hbase_user}",
+                    "value" : "hbase/_HOST@${realm}"
+                  },
+                  "name" : "hbase_regionserver_hbase",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${hbase-env/hbase_user}"
+                    },
+                    "file" : "${keytab_dir}/hbase.service.keytab",
+                    "configuration" : "hbase-site/hbase.regionserver.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                }
+              ],
+              "name" : "HBASE_REGIONSERVER"
+            },
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "hbase-site/hbase.master.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${hbase-env/hbase_user}",
+                    "value" : "hbase/_HOST@${realm}"
+                  },
+                  "name" : "hbase_master_hbase",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${hbase-env/hbase_user}"
+                    },
+                    "file" : "${keytab_dir}/hbase.service.keytab",
+                    "configuration" : "hbase-site/hbase.master.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                }
+              ],
+              "name" : "HBASE_MASTER"
+            }
+          ],
+          "configurations" : [
+            {
+              "hbase-site" : {
+                "hbase.coprocessor.master.classes" : "org.apache.hadoop.hbase.security.access.AccessController",
+                "hbase.security.authentication" : "kerberos",
+                "hbase.coprocessor.region.classes" : "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController",
+                "hbase.security.authorization" : "true",
+                "hbase.bulkload.staging.dir" : "/apps/hbase/staging",
+                "zookeeper.znode.parent" : "/hbase-secure"
+              }
+            }
+          ],
+          "identities" : [
+            {
+              "name" : "/spnego"
+            },
+            {
+              "name" : "/hdfs"
+            },
+            {
+              "principal" : {
+                "configuration" : "hbase-env/hbase_principal_name",
+                "type" : "user",
+                "local_username" : "${hbase-env/hbase_user}",
+                "value" : "${hbase-env/hbase_user}@${realm}"
+              },
+              "name" : "hbase",
+              "keytab" : {
+                "owner" : {
+                  "access" : "r",
+                  "name" : "${hbase-env/hbase_user}"
+                },
+                "file" : "${keytab_dir}/hbase.headless.keytab",
+                "configuration" : "hbase-env/hbase_user_keytab",
+                "group" : {
+                  "access" : "r",
+                  "name" : "${cluster-env/user_group}"
+                }
+              }
+            },
+            {
+              "name" : "/smokeuser"
+            }
+          ],
+          "name" : "HBASE"
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_CLIENT",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "CLIENT",
+            "component_name" : "HBASE_CLIENT",
+            "custom_commands" : [ ],
+            "display_name" : "HBase Client",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "HBASE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_MASTER",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "MASTER",
+            "component_name" : "HBASE_MASTER",
+            "custom_commands" : [
+              "DECOMMISSION"
+            ],
+            "display_name" : "HBase Master",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "HBASE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_MASTER/dependencies/HDFS_CLIENT",
+              "Dependencies" : {
+                "component_name" : "HDFS_CLIENT",
+                "dependent_component_name" : "HBASE_MASTER",
+                "dependent_service_name" : "HBASE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_MASTER/dependencies/ZOOKEEPER_SERVER",
+              "Dependencies" : {
+                "component_name" : "ZOOKEEPER_SERVER",
+                "dependent_component_name" : "HBASE_MASTER",
+                "dependent_service_name" : "HBASE",
+                "scope" : "cluster",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/components/HBASE_REGIONSERVER",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "SLAVE",
+            "component_name" : "HBASE_REGIONSERVER",
+            "custom_commands" : [ ],
+            "display_name" : "RegionServer",
+            "is_client" : false,
+            "is_master" : false,
+            "service_name" : "HBASE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/artifacts/kerberos_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "kerberos_descriptor",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HBASE/artifacts/widgets_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "widgets_descriptor",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS",
+      "StackServices" : {
+        "comments" : "Apache Hadoop Distributed File System",
+        "custom_commands" : [ ],
+        "display_name" : "HDFS",
+        "required_services" : [
+          "ZOOKEEPER"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "HDFS",
+        "service_version" : "2.4.0.2.1",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "core-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          },
+          "hadoop-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "hadoop-policy" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          },
+          "hdfs-log4j" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "hdfs-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          }
+        },
+        "kerberos_descriptor" : {
+          "auth_to_local_properties" : [
+            "core-site/hadoop.security.auth_to_local"
+          ],
+          "components" : [
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${hadoop-env/hdfs_user}",
+                    "value" : "nn/_HOST@${realm}"
+                  },
+                  "name" : "secondary_namenode_nn",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${hadoop-env/hdfs_user}"
+                    },
+                    "file" : "${keytab_dir}/nn.service.keytab",
+                    "configuration" : "hdfs-site/dfs.secondary.namenode.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal",
+                    "type" : "service",
+                    "local_username" : null,
+                    "value" : null
+                  },
+                  "name" : "/spnego"
+                }
+              ],
+              "name" : "SECONDARY_NAMENODE"
+            },
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "hdfs-site/dfs.datanode.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${hadoop-env/hdfs_user}",
+                    "value" : "dn/_HOST@${realm}"
+                  },
+                  "name" : "datanode_dn",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${hadoop-env/hdfs_user}"
+                    },
+                    "file" : "${keytab_dir}/dn.service.keytab",
+                    "configuration" : "hdfs-site/dfs.datanode.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                }
+              ],
+              "configurations" : [
+                {
+                  "hdfs-site" : {
+                    "dfs.datanode.address" : "0.0.0.0:1019",
+                    "dfs.datanode.http.address" : "0.0.0.0:1022"
+                  }
+                }
+              ],
+              "name" : "DATANODE"
+            },
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "hdfs-site/nfs.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${hadoop-env/hdfs_user}",
+                    "value" : "nfs/_HOST@${realm}"
+                  },
+                  "name" : "nfsgateway",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${hadoop-env/hdfs_user}"
+                    },
+                    "file" : "${keytab_dir}/nfs.service.keytab",
+                    "configuration" : "hdfs-site/nfs.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                }
+              ],
+              "name" : "NFS_GATEWAY"
+            },
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "hdfs-site/dfs.journalnode.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${hadoop-env/hdfs_user}",
+                    "value" : "jn/_HOST@${realm}"
+                  },
+                  "name" : "journalnode_jn",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${hadoop-env/hdfs_user}"
+                    },
+                    "file" : "${keytab_dir}/jn.service.keytab",
+                    "configuration" : "hdfs-site/dfs.journalnode.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal",
+                    "type" : "service",
+                    "local_username" : null,
+                    "value" : null
+                  },
+                  "name" : "/spnego"
+                }
+              ],
+              "name" : "JOURNALNODE"
+            },
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "hdfs-site/dfs.namenode.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${hadoop-env/hdfs_user}",
+                    "value" : "nn/_HOST@${realm}"
+                  },
+                  "name" : "namenode_nn",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${hadoop-env/hdfs_user}"
+                    },
+                    "file" : "${keytab_dir}/nn.service.keytab",
+                    "configuration" : "hdfs-site/dfs.namenode.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal",
+                    "type" : "service",
+                    "local_username" : null,
+                    "value" : null
+                  },
+                  "name" : "/spnego"
+                }
+              ],
+              "configurations" : [
+                {
+                  "hdfs-site" : {
+                    "dfs.block.access.token.enable" : "true"
+                  }
+                }
+              ],
+              "name" : "NAMENODE"
+            }
+          ],
+          "configurations" : [
+            {
+              "core-site" : {
+                "hadoop.http.authentication.cookie.domain" : "",
+                "hadoop.security.authentication" : "kerberos",
+                "hadoop.http.authentication.signer.secret.provider.object" : "",
+                "hadoop.http.authentication.kerberos.name.rules" : "",
+                "hadoop.security.auth_to_local" : "",
+                "hadoop.http.authentication.token.validity" : "",
+                "hadoop.rpc.protection" : "authentication",
+                "hadoop.http.authentication.cookie.path" : "",
+                "hadoop.security.authorization" : "true",
+                "hadoop.http.authentication.type" : "simple",
+                "hadoop.http.authentication.signature.secret.file" : "",
+                "hadoop.http.authentication.signature.secret" : "",
+                "hadoop.http.filter.initializers" : "",
+                "hadoop.http.authentication.signer.secret.provider" : "",
+                "hadoop.proxyuser.HTTP.groups" : "${hadoop-env/proxyuser_group}"
+              }
+            }
+          ],
+          "identities" : [
+            {
+              "principal" : {
+                "configuration" : "hdfs-site/dfs.web.authentication.kerberos.principal",
+                "type" : "service",
+                "local_username" : null,
+                "value" : null
+              },
+              "name" : "/spnego",
+              "keytab" : {
+                "owner" : {
+                  "access" : null,
+                  "name" : null
+                },
+                "file" : null,
+                "configuration" : "hdfs-site/dfs.web.authentication.kerberos.keytab",
+                "group" : {
+                  "access" : null,
+                  "name" : null
+                }
+              }
+            },
+            {
+              "name" : "/smokeuser"
+            },
+            {
+              "name" : "/hdfs"
+            }
+          ],
+          "name" : "HDFS"
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/DATANODE",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "SLAVE",
+            "component_name" : "DATANODE",
+            "custom_commands" : [ ],
+            "display_name" : "DataNode",
+            "is_client" : false,
+            "is_master" : false,
+            "service_name" : "HDFS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/HDFS_CLIENT",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "CLIENT",
+            "component_name" : "HDFS_CLIENT",
+            "custom_commands" : [ ],
+            "display_name" : "HDFS Client",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "HDFS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/JOURNALNODE",
+          "StackServiceComponents" : {
+            "cardinality" : "0+",
+            "component_category" : "SLAVE",
+            "component_name" : "JOURNALNODE",
+            "custom_commands" : [ ],
+            "display_name" : "JournalNode",
+            "is_client" : false,
+            "is_master" : false,
+            "service_name" : "HDFS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
+              "Dependencies" : {
+                "component_name" : "HDFS_CLIENT",
+                "dependent_component_name" : "JOURNALNODE",
+                "dependent_service_name" : "HDFS",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/NAMENODE",
+          "StackServiceComponents" : {
+            "cardinality" : "1-2",
+            "component_category" : "MASTER",
+            "component_name" : "NAMENODE",
+            "custom_commands" : [
+              "DECOMMISSION",
+              "REBALANCEHDFS"
+            ],
+            "display_name" : "NameNode",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "HDFS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/SECONDARY_NAMENODE",
+          "StackServiceComponents" : {
+            "cardinality" : "1",
+            "component_category" : "MASTER",
+            "component_name" : "SECONDARY_NAMENODE",
+            "custom_commands" : [ ],
+            "display_name" : "SNameNode",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "HDFS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/components/ZKFC",
+          "StackServiceComponents" : {
+            "cardinality" : "0+",
+            "component_category" : "SLAVE",
+            "component_name" : "ZKFC",
+            "custom_commands" : [ ],
+            "display_name" : "ZKFailoverController",
+            "is_client" : false,
+            "is_master" : false,
+            "service_name" : "HDFS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/artifacts/kerberos_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "kerberos_descriptor",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HDFS/artifacts/widgets_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "widgets_descriptor",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE",
+      "StackServices" : {
+        "comments" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
+        "custom_commands" : [ ],
+        "display_name" : "Hive",
+        "required_services" : [
+          "ZOOKEEPER",
+          "YARN",
+          "TEZ"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "HIVE",
+        "service_version" : "0.13.0.2.1",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "hcat-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "hive-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "hive-exec-log4j" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "hive-log4j" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "hive-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          },
+          "webhcat-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "webhcat-log4j" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "webhcat-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          }
+        },
+        "kerberos_descriptor" : {
+          "components" : [
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "hive-site/hive.server2.authentication.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${hive-env/hive_user}",
+                    "value" : "hive/_HOST@${realm}"
+                  },
+                  "name" : "hive_server_hive",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${hive-env/hive_user}"
+                    },
+                    "file" : "${keytab_dir}/hive.service.keytab",
+                    "configuration" : "hive-site/hive.server2.authentication.kerberos.keytab",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "hive-site/hive.server2.authentication.spnego.principal",
+                    "type" : "service",
+                    "local_username" : null,
+                    "value" : null
+                  },
+                  "name" : "/spnego",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : null,
+                      "name" : null
+                    },
+                    "file" : null,
+                    "configuration" : "hive-site/hive.server2.authentication.spnego.keytab",
+                    "group" : {
+                      "access" : null,
+                      "name" : null
+                    }
+                  }
+                }
+              ],
+              "name" : "HIVE_SERVER"
+            },
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "hive-site/hive.metastore.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${hive-env/hive_user}",
+                    "value" : "hive/_HOST@${realm}"
+                  },
+                  "name" : "hive_metastore_hive",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${hive-env/hive_user}"
+                    },
+                    "file" : "${keytab_dir}/hive.service.keytab",
+                    "configuration" : "hive-site/hive.metastore.kerberos.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                }
+              ],
+              "name" : "HIVE_METASTORE"
+            },
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "webhcat-site/templeton.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : null,
+                    "value" : null
+                  },
+                  "name" : "/spnego",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : null,
+                      "name" : null
+                    },
+                    "file" : null,
+                    "configuration" : "webhcat-site/templeton.kerberos.keytab",
+                    "group" : {
+                      "access" : null,
+                      "name" : null
+                    }
+                  }
+                }
+              ],
+              "name" : "WEBHCAT_SERVER"
+            }
+          ],
+          "configurations" : [
+            {
+              "hive-site" : {
+                "hive.metastore.sasl.enabled" : "true",
+                "hive.server2.authentication" : "KERBEROS",
+                "hive.security.authorization.enabled" : "true"
+              }
+            },
+            {
+              "webhcat-site" : {
+                "templeton.hive.properties" : "hive.metastore.local=false,hive.metastore.uris=thrift://${clusterHostInfo/hive_metastore_host}:9083,hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}",
+                "templeton.kerberos.secret" : "secret"
+              }
+            },
+            {
+              "core-site" : {
+                "hadoop.proxyuser.HTTP.hosts" : "${clusterHostInfo/webhcat_server_host}"
+              }
+            }
+          ],
+          "identities" : [
+            {
+              "name" : "/spnego"
+            },
+            {
+              "name" : "/smokeuser"
+            }
+          ],
+          "name" : "HIVE"
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HCAT",
+          "StackServiceComponents" : {
+            "cardinality" : null,
+            "component_category" : "CLIENT",
+            "component_name" : "HCAT",
+            "custom_commands" : [ ],
+            "display_name" : "HCat Client",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "HIVE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_CLIENT",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "CLIENT",
+            "component_name" : "HIVE_CLIENT",
+            "custom_commands" : [ ],
+            "display_name" : "Hive Client",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "HIVE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_METASTORE",
+          "StackServiceComponents" : {
+            "cardinality" : "1",
+            "component_category" : "MASTER",
+            "component_name" : "HIVE_METASTORE",
+            "custom_commands" : [ ],
+            "display_name" : "Hive Metastore",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "HIVE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "auto_deploy" : {
+            "enabled" : true,
+            "location" : "HIVE/HIVE_SERVER"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER",
+          "StackServiceComponents" : {
+            "cardinality" : "1",
+            "component_category" : "MASTER",
+            "component_name" : "HIVE_SERVER",
+            "custom_commands" : [ ],
+            "display_name" : "HiveServer2",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "HIVE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER/dependencies/MAPREDUCE2_CLIENT",
+              "Dependencies" : {
+                "component_name" : "MAPREDUCE2_CLIENT",
+                "dependent_component_name" : "HIVE_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER/dependencies/TEZ_CLIENT",
+              "Dependencies" : {
+                "component_name" : "TEZ_CLIENT",
+                "dependent_component_name" : "HIVE_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER/dependencies/YARN_CLIENT",
+              "Dependencies" : {
+                "component_name" : "YARN_CLIENT",
+                "dependent_component_name" : "HIVE_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/HIVE_SERVER/dependencies/ZOOKEEPER_SERVER",
+              "Dependencies" : {
+                "component_name" : "ZOOKEEPER_SERVER",
+                "dependent_component_name" : "HIVE_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "cluster",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/MYSQL_SERVER",
+          "StackServiceComponents" : {
+            "cardinality" : "0-1",
+            "component_category" : "MASTER",
+            "component_name" : "MYSQL_SERVER",
+            "custom_commands" : [
+              "CLEAN"
+            ],
+            "display_name" : "MySQL Server",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "HIVE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER",
+          "StackServiceComponents" : {
+            "cardinality" : "1",
+            "component_category" : "MASTER",
+            "component_name" : "WEBHCAT_SERVER",
+            "custom_commands" : [ ],
+            "display_name" : "WebHCat Server",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "HIVE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/HDFS_CLIENT",
+              "Dependencies" : {
+                "component_name" : "HDFS_CLIENT",
+                "dependent_component_name" : "WEBHCAT_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/MAPREDUCE2_CLIENT",
+              "Dependencies" : {
+                "component_name" : "MAPREDUCE2_CLIENT",
+                "dependent_component_name" : "WEBHCAT_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/PIG",
+              "Dependencies" : {
+                "component_name" : "PIG",
+                "dependent_component_name" : "WEBHCAT_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/YARN_CLIENT",
+              "Dependencies" : {
+                "component_name" : "YARN_CLIENT",
+                "dependent_component_name" : "WEBHCAT_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/ZOOKEEPER_CLIENT",
+              "Dependencies" : {
+                "component_name" : "ZOOKEEPER_CLIENT",
+                "dependent_component_name" : "WEBHCAT_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/components/WEBHCAT_SERVER/dependencies/ZOOKEEPER_SERVER",
+              "Dependencies" : {
+                "component_name" : "ZOOKEEPER_SERVER",
+                "dependent_component_name" : "WEBHCAT_SERVER",
+                "dependent_service_name" : "HIVE",
+                "scope" : "cluster",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/artifacts/kerberos_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "kerberos_descriptor",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/HIVE/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/KERBEROS",
+      "StackServices" : {
+        "comments" : "A computer network authentication protocol which works on\n        the basis of 'tickets' to allow nodes communicating over a\n        non-secure network to prove their identity to one another in a\n        secure manner.\n      ",
+        "custom_commands" : [ ],
+        "display_name" : "Kerberos",
+        "required_services" : [ ],
+        "service_check_supported" : true,
+        "service_name" : "KERBEROS",
+        "service_version" : "1.10.3-10",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "kerberos-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "krb5-conf" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          }
+        },
+        "kerberos_descriptor" : {
+          "components" : [
+            {
+              "name" : "KERBEROS_CLIENT"
+            }
+          ],
+          "identities" : [
+            {
+              "name" : "/smokeuser"
+            }
+          ],
+          "name" : "KERBEROS"
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/KERBEROS/components/KERBEROS_CLIENT",
+          "StackServiceComponents" : {
+            "cardinality" : "ALL",
+            "component_category" : "CLIENT",
+            "component_name" : "KERBEROS_CLIENT",
+            "custom_commands" : [
+              "SET_KEYTAB",
+              "REMOVE_KEYTAB"
+            ],
+            "display_name" : "Kerberos Client",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "KERBEROS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "auto_deploy" : {
+            "enabled" : true
+          },
+          "dependencies" : [ ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/KERBEROS/artifacts/kerberos_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "kerberos_descriptor",
+            "service_name" : "KERBEROS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/KERBEROS/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "KERBEROS",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2",
+      "StackServices" : {
+        "comments" : "Apache Hadoop NextGen MapReduce (YARN)",
+        "custom_commands" : [ ],
+        "display_name" : "MapReduce2",
+        "required_services" : [
+          "YARN"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "MAPREDUCE2",
+        "service_version" : "2.1.0.2.0.6.0",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "mapred-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "mapred-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          }
+        },
+        "kerberos_descriptor" : {
+          "components" : [
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "mapred-site/mapreduce.jobhistory.principal",
+                    "type" : "service",
+                    "local_username" : "${mapred-env/mapred_user}",
+                    "value" : "jhs/_HOST@${realm}"
+                  },
+                  "name" : "history_server_jhs",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${mapred-env/mapred_user}"
+                    },
+                    "file" : "${keytab_dir}/jhs.service.keytab",
+                    "configuration" : "mapred-site/mapreduce.jobhistory.keytab",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "mapred-site/mapreduce.jobhistory.webapp.spnego-principal",
+                    "type" : "service",
+                    "local_username" : null,
+                    "value" : null
+                  },
+                  "name" : "/spnego",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : null,
+                      "name" : null
+                    },
+                    "file" : null,
+                    "configuration" : "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file",
+                    "group" : {
+                      "access" : null,
+                      "name" : null
+                    }
+                  }
+                }
+              ],
+              "name" : "HISTORYSERVER"
+            }
+          ],
+          "identities" : [
+            {
+              "name" : "/spnego"
+            },
+            {
+              "name" : "/hdfs"
+            },
+            {
+              "name" : "/smokeuser"
+            }
+          ],
+          "name" : "MAPREDUCE2"
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/components/HISTORYSERVER",
+          "StackServiceComponents" : {
+            "cardinality" : "1",
+            "component_category" : "MASTER",
+            "component_name" : "HISTORYSERVER",
+            "custom_commands" : [ ],
+            "display_name" : "History Server",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "MAPREDUCE2",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "auto_deploy" : {
+            "enabled" : true,
+            "location" : "YARN/RESOURCEMANAGER"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/components/HISTORYSERVER/dependencies/HDFS_CLIENT",
+              "Dependencies" : {
+                "component_name" : "HDFS_CLIENT",
+                "dependent_component_name" : "HISTORYSERVER",
+                "dependent_service_name" : "MAPREDUCE2",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/components/MAPREDUCE2_CLIENT",
+          "StackServiceComponents" : {
+            "cardinality" : "0+",
+            "component_category" : "CLIENT",
+            "component_name" : "MAPREDUCE2_CLIENT",
+            "custom_commands" : [ ],
+            "display_name" : "MapReduce2 Client",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "MAPREDUCE2",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/artifacts/kerberos_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "kerberos_descriptor",
+            "service_name" : "MAPREDUCE2",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "MAPREDUCE2",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/MAPREDUCE2/artifacts/widgets_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "widgets_descriptor",
+            "service_name" : "MAPREDUCE2",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE",
+      "StackServices" : {
+        "comments" : "System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the <a target=\"_blank\" href=\"http://www.sencha.com/legal/open-source-faq/\">ExtJS</a> Library.\n      ",
+        "custom_commands" : [ ],
+        "display_name" : "Oozie",
+        "required_services" : [
+          "YARN"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "OOZIE",
+        "service_version" : "4.0.0.2.1",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "oozie-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "oozie-log4j" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "oozie-site" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "true"
+            }
+          }
+        },
+        "kerberos_descriptor" : {
+          "auth_to_local_properties" : [
+            "oozie-site/oozie.authentication.kerberos.name.rules"
+          ],
+          "components" : [
+            {
+              "identities" : [
+                {
+                  "principal" : {
+                    "configuration" : "oozie-site/oozie.service.HadoopAccessorService.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : "${oozie-env/oozie_user}",
+                    "value" : "oozie/_HOST@${realm}"
+                  },
+                  "name" : "oozie_server",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : "r",
+                      "name" : "${oozie-env/oozie_user}"
+                    },
+                    "file" : "${keytab_dir}/oozie.service.keytab",
+                    "configuration" : "oozie-site/oozie.service.HadoopAccessorService.keytab.file",
+                    "group" : {
+                      "access" : "",
+                      "name" : "${cluster-env/user_group}"
+                    }
+                  }
+                },
+                {
+                  "principal" : {
+                    "configuration" : "oozie-site/oozie.authentication.kerberos.principal",
+                    "type" : "service",
+                    "local_username" : null,
+                    "value" : null
+                  },
+                  "name" : "/spnego",
+                  "keytab" : {
+                    "owner" : {
+                      "access" : null,
+                      "name" : null
+                    },
+                    "file" : null,
+                    "configuration" : "oozie-site/oozie.authentication.kerberos.keytab",
+                    "group" : {
+                      "access" : null,
+                      "name" : null
+                    }
+                  }
+                }
+              ],
+              "name" : "OOZIE_SERVER"
+            }
+          ],
+          "configurations" : [
+            {
+              "oozie-site" : {
+                "oozie.service.HadoopAccessorService.kerberos.enabled" : "true",
+                "oozie.authentication.type" : "kerberos",
+                "oozie.authentication.kerberos.name.rules" : "",
+                "oozie.service.AuthorizationService.authorization.enabled" : "true",
+                "local.realm" : "${realm}"
+              }
+            }
+          ],
+          "identities" : [
+            {
+              "name" : "/spnego"
+            },
+            {
+              "name" : "/smokeuser"
+            },
+            {
+              "name" : "/hdfs"
+            }
+          ],
+          "name" : "OOZIE"
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_CLIENT",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "CLIENT",
+            "component_name" : "OOZIE_CLIENT",
+            "custom_commands" : [ ],
+            "display_name" : "Oozie Client",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_CLIENT/dependencies/HDFS_CLIENT",
+              "Dependencies" : {
+                "component_name" : "HDFS_CLIENT",
+                "dependent_component_name" : "OOZIE_CLIENT",
+                "dependent_service_name" : "OOZIE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_CLIENT/dependencies/MAPREDUCE2_CLIENT",
+              "Dependencies" : {
+                "component_name" : "MAPREDUCE2_CLIENT",
+                "dependent_component_name" : "OOZIE_CLIENT",
+                "dependent_service_name" : "OOZIE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_SERVER",
+          "StackServiceComponents" : {
+            "cardinality" : "1",
+            "component_category" : "MASTER",
+            "component_name" : "OOZIE_SERVER",
+            "custom_commands" : [ ],
+            "display_name" : "Oozie Server",
+            "is_client" : false,
+            "is_master" : true,
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_SERVER/dependencies/HDFS_CLIENT",
+              "Dependencies" : {
+                "component_name" : "HDFS_CLIENT",
+                "dependent_component_name" : "OOZIE_SERVER",
+                "dependent_service_name" : "OOZIE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_SERVER/dependencies/MAPREDUCE2_CLIENT",
+              "Dependencies" : {
+                "component_name" : "MAPREDUCE2_CLIENT",
+                "dependent_component_name" : "OOZIE_SERVER",
+                "dependent_service_name" : "OOZIE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/components/OOZIE_SERVER/dependencies/YARN_CLIENT",
+              "Dependencies" : {
+                "component_name" : "YARN_CLIENT",
+                "dependent_component_name" : "OOZIE_SERVER",
+                "dependent_service_name" : "OOZIE",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/artifacts/kerberos_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "kerberos_descriptor",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        },
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/OOZIE/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/PIG",
+      "StackServices" : {
+        "comments" : "Scripting platform for analyzing large datasets",
+        "custom_commands" : [ ],
+        "display_name" : "Pig",
+        "required_services" : [
+          "YARN"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "PIG",
+        "service_version" : "0.12.1.2.1",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "pig-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "pig-log4j" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          },
+          "pig-properties" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          }
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/PIG/components/PIG",
+          "StackServiceComponents" : {
+            "cardinality" : "0+",
+            "component_category" : "CLIENT",
+            "component_name" : "PIG",
+            "custom_commands" : [ ],
+            "display_name" : "Pig",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "PIG",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [ ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/PIG/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "service_name" : "PIG",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP",
+      "StackServices" : {
+        "comments" : "Tool for transferring bulk data between Apache Hadoop and\n        structured data stores such as relational databases\n      ",
+        "custom_commands" : [ ],
+        "display_name" : "Sqoop",
+        "required_services" : [
+          "HDFS"
+        ],
+        "service_check_supported" : true,
+        "service_name" : "SQOOP",
+        "service_version" : "1.4.4.2.1",
+        "stack_name" : "HDP",
+        "stack_version" : "2.1",
+        "user_name" : null,
+        "config_types" : {
+          "sqoop-env" : {
+            "supports" : {
+              "adding_forbidden" : "false",
+              "do_not_extend" : "false",
+              "final" : "false"
+            }
+          }
+        }
+      },
+      "components" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP/components/SQOOP",
+          "StackServiceComponents" : {
+            "cardinality" : "1+",
+            "component_category" : "CLIENT",
+            "component_name" : "SQOOP",
+            "custom_commands" : [ ],
+            "display_name" : "Sqoop",
+            "is_client" : true,
+            "is_master" : false,
+            "service_name" : "SQOOP",
+            "stack_name" : "HDP",
+            "stack_version" : "2.1"
+          },
+          "dependencies" : [
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP/components/SQOOP/dependencies/HDFS_CLIENT",
+              "Dependencies" : {
+                "component_name" : "HDFS_CLIENT",
+                "dependent_component_name" : "SQOOP",
+                "dependent_service_name" : "SQOOP",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            },
+            {
+              "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP/components/SQOOP/dependencies/MAPREDUCE2_CLIENT",
+              "Dependencies" : {
+                "component_name" : "MAPREDUCE2_CLIENT",
+                "dependent_component_name" : "SQOOP",
+                "dependent_service_name" : "SQOOP",
+                "scope" : "host",
+                "stack_name" : "HDP",
+                "stack_version" : "2.1"
+              }
+            }
+          ]
+        }
+      ],
+      "artifacts" : [
+        {
+          "href" : "http://c6401.ambari.apache.org:8080/api/v1/stacks/HDP/versions/2.1/services/SQOOP/artifacts/metrics_descriptor",
+          "Artifacts" : {
+            "artifact_name" : "metrics_descriptor",
+            "servic

<TRUNCATED>

[25/50] [abbrv] ambari git commit: AMBARI-19337. Ambari has some spelling mistakes in YARN proxyuser properties in many places (Jay SenSharma via smohanty)

Posted by nc...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json.orig
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json.orig b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json.orig
new file mode 100644
index 0000000..f14eb52
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.2/configs/pig-service-check-secure.json.orig
@@ -0,0 +1,651 @@
+{
+    "configuration_attributes": {
+        "mapred-site": {}, 
+        "pig-env": {}, 
+        "ranger-hdfs-plugin-properties": {}, 
+        "kerberos-env": {}, 
+        "tez-site": {}, 
+        "hdfs-site": {}, 
+        "tez-env": {}, 
+        "yarn-log4j": {}, 
+        "hadoop-policy": {}, 
+        "hdfs-log4j": {}, 
+        "mapred-env": {}, 
+        "krb5-conf": {}, 
+        "pig-properties": {}, 
+        "core-site": {}, 
+        "yarn-env": {}, 
+        "hadoop-env": {}, 
+        "zookeeper-log4j": {}, 
+        "yarn-site": {}, 
+        "capacity-scheduler": {}, 
+        "zoo.cfg": {}, 
+        "zookeeper-env": {}, 
+        "pig-log4j": {}, 
+        "cluster-env": {}
+    }, 
+    "commandParams": {
+        "command_timeout": "300", 
+        "script": "scripts/service_check.py", 
+        "script_type": "PYTHON", 
+        "service_package_folder": "common-services/PIG/0.12.0.2.0/package", 
+        "hooks_folder": "HDP/2.0.6/hooks"
+    }, 
+    "roleCommand": "SERVICE_CHECK", 
+    "kerberosCommandParams": [], 
+    "clusterName": "c1", 
+    "hostname": "c6403.ambari.apache.org", 
+    "hostLevelParams": {
+        "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+        "ambari_db_rca_password": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_67",
+        "java_version": "8",
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "stack_version": "2.2", 
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-7u67-linux-x64.tar.gz", 
+        "ambari_db_rca_username": "mapred", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "agentCacheDir": "/var/lib/ambari-agent/cache", 
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "PIG", 
+    "role": "PIG_SERVICE_CHECK", 
+    "forceRefreshConfigTags": [], 
+    "taskId": 180, 
+    "public_hostname": "c6403.ambari.apache.org", 
+    "configurations": {
+        "mapred-site": {
+            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
+            "mapreduce.jobhistory.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab", 
+            "mapreduce.reduce.input.buffer.percent": "0.0", 
+            "mapreduce.output.fileoutputformat.compress": "false", 
+            "mapreduce.framework.name": "yarn", 
+            "mapreduce.map.speculative": "false", 
+            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
+            "yarn.app.mapreduce.am.resource.mb": "682", 
+            "mapreduce.map.java.opts": "-Xmx546m", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", 
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
+            "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework", 
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
+            "mapreduce.reduce.speculative": "false", 
+            "mapreduce.reduce.java.opts": "-Xmx546m", 
+            "mapreduce.am.max-attempts": "2", 
+            "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}", 
+            "mapreduce.reduce.log.level": "INFO", 
+            "mapreduce.map.sort.spill.percent": "0.7", 
+            "mapreduce.job.emit-timeline-data": "false", 
+            "mapreduce.task.io.sort.mb": "273", 
+            "mapreduce.task.timeout": "300000", 
+            "mapreduce.map.memory.mb": "682", 
+            "mapreduce.task.io.sort.factor": "100", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapreduce.reduce.memory.mb": "682", 
+            "mapreduce.jobhistory.principal": "jhs/_HOST@EXAMPLE.COM", 
+            "yarn.app.mapreduce.am.log.level": "INFO", 
+            "mapreduce.map.log.level": "INFO", 
+            "mapreduce.shuffle.port": "13562", 
+            "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000", 
+            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", 
+            "mapreduce.map.output.compress": "false", 
+            "yarn.app.mapreduce.am.staging-dir": "/user", 
+            "mapreduce.reduce.shuffle.parallelcopies": "30", 
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
+            "mapreduce.jobhistory.keytab": "/etc/security/keytabs/jhs.service.keytab", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
+            "mapreduce.reduce.shuffle.fetch.retry.enabled": "1", 
+            "mapreduce.jobhistory.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}", 
+            "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000", 
+            "mapreduce.jobhistory.bind-host": "0.0.0.0", 
+            "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}"
+        }, 
+        "pig-env": {
+            "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
+        }, 
+        "kerberos-env": {
+            "ad_create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}\n    ",
+            "realm": "EXAMPLE.COM", 
+            "container_dn": "", 
+            "ldap_url": "", 
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
+            "kdc_type": "mit-kdc",
+            "kdc_hosts": "c6401.ambari.apache.org",
+            "admin_server_host": "c6401.ambari.apache.org"
+        },
+        "tez-site": {
+            "tez.task.get-task.sleep.interval-ms.max": "200", 
+            "tez.task.max-events-per-heartbeat": "500", 
+            "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
+            "tez.runtime.compress": "true", 
+            "tez.runtime.io.sort.mb": "272", 
+            "tez.runtime.convert.user-payload.to.history-text": "false", 
+            "tez.generate.debug.artifacts": "false", 
+            "tez.am.tez-ui.history-url.template": "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__", 
+            "tez.am.log.level": "INFO", 
+            "tez.counters.max.groups": "1000", 
+            "tez.runtime.unordered.output.buffer.size-mb": "51", 
+            "tez.shuffle-vertex-manager.max-src-fraction": "0.4", 
+            "tez.counters.max": "2000", 
+            "tez.task.resource.memory.mb": "682", 
+            "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService", 
+            "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz", 
+            "tez.task.am.heartbeat.counter.interval-ms.max": "4000", 
+            "tez.am.max.app.attempts": "2", 
+            "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", 
+            "tez.am.container.idle.release-timeout-max.millis": "20000", 
+            "tez.use.cluster.hadoop-libs": "false", 
+            "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
+            "tez.am.container.idle.release-timeout-min.millis": "10000", 
+            "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec", 
+            "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
+            "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", 
+            "tez.am.container.reuse.enabled": "true", 
+            "tez.session.am.dag.submit.timeout.secs": "300", 
+            "tez.grouping.min-size": "16777216", 
+            "tez.grouping.max-size": "1073741824", 
+            "tez.session.client.timeout.secs": "-1", 
+            "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", 
+            "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
+            "tez.staging-dir": "/tmp/${user.name}/staging", 
+            "tez.am.am-rm.heartbeat.interval-ms.max": "250", 
+            "tez.am.maxtaskfailures.per.node": "10", 
+            "tez.am.container.reuse.non-local-fallback.enabled": "false", 
+            "tez.am.container.reuse.locality.delay-allocation-millis": "250", 
+            "tez.am.container.reuse.rack-fallback.enabled": "true", 
+            "tez.grouping.split-waves": "1.7", 
+            "tez.shuffle-vertex-manager.min-src-fraction": "0.2", 
+            "tez.am.resource.memory.mb": "1364"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:1019", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1.0f", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
+            "dfs.blocksize": "134217728", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.datanode.max.transfer.threads": "4096", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "dfs.datanode.https.address": "0.0.0.0:50475", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.http.address": "0.0.0.0:1022", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.http.policy": "HTTP_ONLY", 
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.journalnode.https-address": "0.0.0.0:8481", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600"
+        }, 
+        "tez-env": {
+            "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
+            "tez_user": "tez"
+        }, 
+        "yarn-log4j": {
+            "content": "\n#Relative to Yarn Log Dir Prefix\nyarn.log.dir=.\n#\n# Job Summary Appender\n#\n# Use following logger to send summary to separate file defined by\n# hadoop.mapreduce.jobsummary.log.file rolled daily:\n# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n#\nhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\nhadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\nlog4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n# Set the ResourceManager summary log filename\nyarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log\n# Set the ResourceManager summary log level and appender\nyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}\n#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\n\n# To enable AppSummaryLogging for the RM,\n# set yarn.server.resourcemanager.appsummary.logger to\n# LEVEL,RMSUMMARY in hadoop-env.sh\n\n# Appender for ResourceManager Application Summary Log\n# Requires the 
 following properties to be set\n#    - hadoop.log.dir (Hadoop Log directory)\n#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)\n#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)\nlog4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender\nlog4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}\nlog4j.appender.RMSUMMARY.MaxFileSize=256MB\nlog4j.appender.RMSUMMARY.MaxBackupIndex=20\nlog4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\nlog4j.appender.JSA.DatePattern=.yyyy-MM-dd\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$Applic
 ationSummary=${yarn.server.resourcemanager.appsummary.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false"
+        }, 
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*", 
+            "security.job.task.protocol.acl": "*", 
+            "security.datanode.protocol.acl": "*", 
+            "security.namenode.protocol.acl": "*", 
+            "security.client.datanode.protocol.acl": "*", 
+            "security.inter.tracker.protocol.acl": "*", 
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+            "security.client.protocol.acl": "*", 
+            "security.refresh.policy.protocol.acl": "hadoop", 
+            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.inter.datanode.protocol.acl": "*"
+        }, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.fi
 le}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\n
 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
+        }, 
+        "mapred-env": {
+            "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "mapred_user": "mapred", 
+            "jobhistory_heapsize": "900", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+        }, 
+        "krb5-conf": {
+            "conf_dir": "/etc",
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n{% if domains %}\n[domain_realm]\n{%- for domain in domains.split(',') %}\n  {{domain|trim()}} = {{realm}}\n{%- endfor %}\n{% endif %}\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n{%- if kdc_hosts > 0 -%}\n{%- set kdc_host_list = kdc_hosts.split(',')  -%}\n{%- if kdc_host_list and kdc_host_list|length > 0 %}\n    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}\n{%- if kdc_host_list -%}\n{% for kdc_host in kdc_host_list %}\n    kdc = {{kdc_host|trim()}}\n{%- endfor -%}\n{% endif %}\n{%- endif %}\n{%- endif %}\n  }\n\n{# Append additional realm declarations bel
 ow #}\n    ",
+            "domains": "",
+            "manage_krb5_conf": "true"
+        },
+        "pig-properties": {
+            "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.\n# see bin/pig -help\n\n# brief logging (no timestamps
 )\nbrief=false\n\n# debug level, INFO is default\ndebug=INFO\n\n# verbose print all log messages to screen (default to print only INFO and above to screen)\nverbose=false\n\n# exectype local|mapreduce, mapreduce is default\nexectype=mapreduce\n\n# Enable insertion of information about script into hadoop job conf \npig.script.info.enabled=true\n\n# Do not spill temp files smaller than this size (bytes)\npig.spill.size.threshold=5000000\n\n# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)\n# This should help reduce the number of files being spilled.\npig.spill.gc.activation.size=40000000\n\n# the following two parameters are to help estimate the reducer number\npig.exec.reducers.bytes.per.reducer=1000000000\npig.exec.reducers.max=999\n\n# Temporary location to store the intermediate data.\npig.temp.dir=/tmp/\n\n# Threshold for merging FRJoin fragment files\npig.files.concatenation.threshold=100\npig.optimistic.files.concatenation=false;\n\npi
 g.disable.counter=false\n\n# Avoid pig failures when multiple jobs write to the same location\npig.location.check.strict=false\n\nhcat.bin=/usr/bin/hcat"
+        }, 
+        "core-site": {
+            "hadoop.http.authentication.signature.secret": "", 
+            "proxyuser_group": "users", 
+            "hadoop.http.authentication.cookie.domain": "", 
+            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
+            "fs.trash.interval": "360", 
+            "hadoop.http.authentication.signer.secret.provider.object": "", 
+            "hadoop.http.authentication.token.validity": "", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
+            "ipc.client.idlethreshold": "8000", 
+            "hadoop.http.authentication.cookie.path": "", 
+            "hadoop.http.authentication.signer.secret.provider": "", 
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "hadoop.rpc.protection": "authentication", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.security.authentication": "kerberos", 
+            "hadoop.http.filter.initializers": "", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.http.authentication.kerberos.name.rules": "", 
+            "hadoop.proxyuser.HTTP.groups": "users", 
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
+            "hadoop.http.authentication.signature.secret.file": "", 
+            "hadoop.http.authentication.type": "simple", 
+            "hadoop.security.authorization": "true", 
+            "ipc.server.tcpnodelay": "true", 
+            "ipc.client.connect.max.retries": "50", 
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hdfs@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jhs@EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nm@EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](rm@EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](yarn@EXAMPLE.COM)s/.*/yarn/\nDEFAULT", 
+            "ipc.client.connection.maxidletime": "30000"
+        }, 
+        "yarn-env": {
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "apptimelineserver_heapsize": "1024", 
+            "nodemanager_heapsize": "1024", 
+            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"", 
+            "yarn_heapsize": "1024", 
+            "min_user_id": "500", 
+            "yarn_user": "yarn", 
+            "resourcemanager_heapsize": "1024", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn"
+        }, 
+        "hadoop-env": {
+            "dtnode_heapsize": "1024m", 
+            "namenode_opt_maxnewsize": "256m", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "namenode_heapsize": "1024m", 
+            "proxyuser_group": "users", 
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
+            "hdfs_user": "hdfs", 
+            "namenode_opt_newsize": "256m", 
+            "hadoop_root_logger": "INFO,RFA", 
+            "hadoop_heapsize": "1024", 
+            "namenode_opt_maxpermsize": "256m", 
+            "namenode_opt_permsize": "128m", 
+            "hdfs_principal_name": "hdfs@EXAMPLE.COM"
+        }, 
+        "zookeeper-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
+        }, 
+        "yarn-site": {
+            "yarn.timeline-service.http-authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
+            "yarn.resourcemanager.zk-num-retries": "1000", 
+            "yarn.timeline-service.http-authentication.signature.secret.file": "", 
+            "yarn.timeline-service.bind-host": "0.0.0.0", 
+            "yarn.resourcemanager.ha.enabled": "false", 
+            "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn", 
+            "yarn.timeline-service.http-authentication.signature.secret": "", 
+            "yarn.timeline-service.webapp.address": "c6402.ambari.apache.org:8188", 
+            "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}", 
+            "yarn.timeline-service.enabled": "true", 
+            "yarn.nodemanager.recovery.enabled": "true", 
+            "yarn.timeline-service.principal": "yarn/_HOST@EXAMPLE.COM", 
+            "yarn.nodemanager.keytab": "/etc/security/keytabs/nm.service.keytab", 
+            "yarn.timeline-service.address": "c6402.ambari.apache.org:10200", 
+            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
+            "yarn.resourcemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "yarn.resourcemanager.am.max-attempts": "2", 
+            "yarn.nodemanager.log-aggregation.debug-enabled": "false", 
+            "yarn.resourcemanager.system-metrics-publisher.enabled": "true", 
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
+            "yarn.nodemanager.bind-host": "0.0.0.0", 
+            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
+            "yarn.nodemanager.linux-container-executor.cgroups.mount": "false", 
+            "yarn.timeline-service.http-authentication.cookie.path": "", 
+            "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10", 
+            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
+            "yarn.nodemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "yarn.timeline-service.keytab": "/etc/security/keytabs/yarn.service.keytab", 
+            "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*", 
+            "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false", 
+            "yarn.resourcemanager.keytab": "/etc/security/keytabs/rm.service.keytab", 
+            "yarn.resourcemanager.principal": "rm/_HOST@EXAMPLE.COM", 
+            "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", 
+            "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false", 
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
+            "yarn.resourcemanager.connect.max-wait.ms": "900000", 
+            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
+            "yarn.timeline-service.http-authentication.token.validity": "", 
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true", 
+            "yarn.scheduler.maximum-allocation-mb": "2048", 
+            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
+            "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500", 
+            "yarn.resourcemanager.zk-acl": "world:anyone:rwcda", 
+            "yarn.resourcemanager.webapp.https.address": "c6402.ambari.apache.org:8090", 
+            "yarn.log-aggregation-enable": "true", 
+            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore", 
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "", 
+            "yarn.timeline-service.client.retry-interval-ms": "1000", 
+            "hadoop.registry.zk.quorum": "c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181,c6401.ambari.apache.org:2181", 
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
+            "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler", 
+            "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90", 
+            "yarn.resourcemanager.zk-timeout-ms": "10000", 
+            "yarn.resourcemanager.fs.state-store.uri": " ", 
+            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
+            "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000", 
+            "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore", 
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "", 
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
+            "yarn.nodemanager.principal": "nm/_HOST@EXAMPLE.COM", 
+            "yarn.resourcemanager.work-preserving-recovery.enabled": "true", 
+            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
+            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
+            "yarn.nodemanager.resource.memory-mb": "2048", 
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "", 
+            "yarn.nodemanager.resource.cpu-vcores": "1", 
+            "yarn.resourcemanager.proxyusers.*.users": "", 
+            "yarn.timeline-service.ttl-ms": "2678400000", 
+            "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100", 
+            "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000", 
+            "yarn.resourcemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab", 
+            "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1", 
+            "yarn.nodemanager.log.retain-seconds": "604800",
+            "yarn.timeline-service.http-authentication.type": "kerberos", 
+            "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
+            "yarn.resourcemanager.proxyusers.*.groups": "", 
+            "yarn.timeline-service.client.max-retries": "30", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000", 
+            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
+            "yarn.nodemanager.vmem-check-enabled": "false", 
+            "yarn.acl.enable": "true", 
+            "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager", 
+            "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600", 
+            "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler", 
+            "yarn.client.nodemanager-connect.max-wait-ms": "60000", 
+            "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", 
+            "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", 
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "", 
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "", 
+            "yarn.resourcemanager.bind-host": "0.0.0.0", 
+            "yarn.http.policy": "HTTP_ONLY", 
+            "yarn.resourcemanager.zk-address": "c6402.ambari.apache.org:2181", 
+            "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state", 
+            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor", 
+            "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore", 
+            "yarn.nodemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab", 
+            "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline", 
+            "yarn.scheduler.minimum-allocation-mb": "682", 
+            "yarn.timeline-service.ttl-enable": "true", 
+            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
+            "yarn.log-aggregation.retain-seconds": "2592000", 
+            "yarn.nodemanager.address": "0.0.0.0:45454", 
+            "hadoop.registry.rm.enabled": "false", 
+            "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000", 
+            "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", 
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "", 
+            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
+            "yarn.timeline-service.http-authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", 
+            "yarn.resourcemanager.recovery.enabled": "true", 
+            "yarn.timeline-service.http-authentication.cookie.domain": "", 
+            "yarn.resourcemanager.zk-retry-interval-ms": "1000", 
+            "yarn.admin.acl": "",
+            "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels", 
+            "yarn.client.nodemanager-connect.retry-interval-ms": "10000", 
+            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
+            "yarn.timeline-service.webapp.https.address": "c6402.ambari.apache.org:8190", 
+            "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", 
+            "yarn.resourcemanager.connect.retry-interval.ms": "30000", 
+            "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000", 
+            "yarn.resourcemanager.proxyusers.*.hosts": ""
+        }, 
+        "capacity-scheduler": {
+            "yarn.scheduler.capacity.default.minimum-user-limit-percent": "100", 
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
+            "yarn.scheduler.capacity.root.default.user-limit-factor": "1", 
+            "yarn.scheduler.capacity.root.accessible-node-labels": "*", 
+            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
+            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.queues": "default", 
+            "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": "-1", 
+            "yarn.scheduler.capacity.root.default-node-label-expression": " ", 
+            "yarn.scheduler.capacity.node-locality-delay": "40", 
+            "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": "-1", 
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "*", 
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
+            "yarn.scheduler.capacity.root.acl_administer_queue": "*", 
+            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
+            "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator"
+        }, 
+        "zoo.cfg": {
+            "clientPort": "2181", 
+            "autopurge.purgeInterval": "24", 
+            "syncLimit": "5", 
+            "dataDir": "/hadoop/zookeeper", 
+            "initLimit": "10", 
+            "tickTime": "2000", 
+            "autopurge.snapRetainCount": "30"
+        }, 
+        "zookeeper-env": {
+            "zk_user": "zookeeper", 
+            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM"
+        }, 
+        "pig-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n# ***** Set root logger level to DEBUG and its only appender to A.\nlog4j.logger.org.apache.pig=info, A\n\n# ***** A is set to be a ConsoleAppender.\
 nlog4j.appender.A=org.apache.log4j.ConsoleAppender\n# ***** A uses PatternLayout.\nlog4j.appender.A.layout=org.apache.log4j.PatternLayout\nlog4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n"
+        }, 
+        "cluster-env": {
+            "managed_hdfs_resource_property_names": "",
+            "security_enabled": "true",
+            "ignore_groupsusers_create": "false",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop",
+            "smokeuser_principal_name": "ambari-qa@EXAMPLE.COM",
+            "smokeuser": "ambari-qa",
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab"
+        }
+    }, 
+    "configurationTags": {
+        "mapred-site": {
+            "tag": "version1425150589654"
+        }, 
+        "pig-env": {
+            "tag": "version1425325831978"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "tag": "version1"
+        }, 
+        "kerberos-env": {
+            "tag": "version1425149782373"
+        }, 
+        "tez-site": {
+            "tag": "version1"
+        }, 
+        "hdfs-site": {
+            "tag": "version1425150589741"
+        }, 
+        "tez-env": {
+            "tag": "version1"
+        }, 
+        "yarn-log4j": {
+            "tag": "version1"
+        }, 
+        "hadoop-policy": {
+            "tag": "version1"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "mapred-env": {
+            "tag": "version1"
+        }, 
+        "krb5-conf": {
+            "tag": "version1425149782373"
+        }, 
+        "pig-properties": {
+            "tag": "version1425325831978"
+        }, 
+        "core-site": {
+            "tag": "version1425150589818"
+        }, 
+        "yarn-env": {
+            "tag": "version1"
+        }, 
+        "hadoop-env": {
+            "tag": "version1425150589788"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "yarn-site": {
+            "tag": "version1425150589763"
+        }, 
+        "capacity-scheduler": {
+            "tag": "version1"
+        }, 
+        "zoo.cfg": {
+            "tag": "version1"
+        }, 
+        "zookeeper-env": {
+            "tag": "version1425150589681"
+        }, 
+        "pig-log4j": {
+            "tag": "version1425325831978"
+        }, 
+        "cluster-env": {
+            "tag": "version1425150589709"
+        }
+    }, 
+    "commandId": "15-0", 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "nm_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "app_timeline_server_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670", 
+            "8670", 
+            "8670"
+        ], 
+        "rm_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "ambari_server_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c689096d/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
index bcc5359..3d0dc28 100644
--- a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
+++ b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_2_1_3.json
@@ -796,7 +796,7 @@
     }, {
       "yarn-site": {
         "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
-        "yarn.resourcemanager.proxyusers.*.users": "",
+        "yarn.resourcemanager.proxyuser.*.users": "",
         "yarn.timeline-service.http-authentication.token.validity": "",
         "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
         "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
@@ -805,14 +805,14 @@
         "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
         "yarn.acl.enable": "true",
         "yarn.timeline-service.http-authentication.signer.secret.provider": "",
-        "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
-        "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+        "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+        "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
         "yarn.timeline-service.http-authentication.signature.secret": "",
         "yarn.timeline-service.http-authentication.signature.secret.file": "",
-        "yarn.resourcemanager.proxyusers.*.hosts": "",
-        "yarn.resourcemanager.proxyusers.*.groups": "",
+        "yarn.resourcemanager.proxyuser.*.hosts": "",
+        "yarn.resourcemanager.proxyuser.*.groups": "",
         "yarn.timeline-service.enabled": "true",
-        "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+        "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
         "yarn.timeline-service.http-authentication.cookie.domain": ""
       }
     }, {


[15/50] [abbrv] ambari git commit: AMBARI-19561. After ambari only upgrade Property:"yarn.nodemanager.linux-container-executor.cgroups.mount-path" become required. Fix UT (dgrinenko via dlysnichenko)

Posted by nc...@apache.org.
AMBARI-19561. After ambari only upgrade Property:"yarn.nodemanager.linux-container-executor.cgroups.mount-path" become required. Fix UT (dgrinenko via dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e3e9f70d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e3e9f70d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e3e9f70d

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e3e9f70de671b0700153af8b0af0acd5bc5ae89e
Parents: 1524fd7
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Tue Jan 17 15:47:50 2017 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Tue Jan 17 15:47:50 2017 +0200

----------------------------------------------------------------------
 .../org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e3e9f70d/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index 484a09a..f531433 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -285,6 +285,7 @@ public class UpgradeCatalog250Test {
     Method updateLogSearchConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateLogSearchConfigs");
     Method updateAmbariInfraConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAmbariInfraConfigs");
     Method updateRangerUrlConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateRangerUrlConfigs");
+    Method updateYarnSite = UpgradeCatalog250.class.getDeclaredMethod("updateYarnSite");
 
     UpgradeCatalog250 upgradeCatalog250 = createMockBuilder(UpgradeCatalog250.class)
         .addMockedMethod(updateAmsConfigs)
@@ -300,6 +301,7 @@ public class UpgradeCatalog250Test {
         .addMockedMethod(updateLogSearchConfigs)
         .addMockedMethod(updateAmbariInfraConfigs)
         .addMockedMethod(updateRangerUrlConfigs)
+        .addMockedMethod(updateYarnSite)
         .createMock();
 
     upgradeCatalog250.updateAMSConfigs();
@@ -341,6 +343,9 @@ public class UpgradeCatalog250Test {
     upgradeCatalog250.addManageServiceAutoStartPermissions();
     expectLastCall().once();
 
+    upgradeCatalog250.updateYarnSite();
+    expectLastCall().once();
+
     replay(upgradeCatalog250);
 
     upgradeCatalog250.executeDMLUpdates();


[40/50] [abbrv] ambari git commit: AMBARI-19578. Fix issues around modifying yarn min container size (smohanty)

Posted by nc...@apache.org.
AMBARI-19578. Fix issues around modifying yarn min container size (smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e0d78edb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e0d78edb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e0d78edb

Branch: refs/heads/branch-dev-patch-upgrade
Commit: e0d78edb94cbbfe56b075487d241143f337a0a87
Parents: 6ccff93
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Jan 17 20:59:33 2017 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Jan 17 21:01:21 2017 -0800

----------------------------------------------------------------------
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 46 ++++++++++++++------
 .../stacks/2.0.6/common/test_stack_advisor.py   | 21 +++++----
 .../stacks/2.2/common/test_stack_advisor.py     | 20 ++++-----
 .../stacks/2.5/common/test_stack_advisor.py     | 44 +++++++++----------
 4 files changed, 74 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e0d78edb/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 3596798..fc989fe 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -196,8 +196,26 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     nodemanagerMinRam = 1048576 # 1TB in mb
     if "referenceNodeManagerHost" in clusterData:
       nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024, nodemanagerMinRam)
+
+    callContext = getCallContext(services)
     putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))
-    putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['minContainerRam']))
+    # read from the supplied config
+    #if 'recommendConfigurations' != callContext and \
+    #        "yarn-site" in services["configurations"] and \
+    #        "yarn.nodemanager.resource.memory-mb" in services["configurations"]["yarn-site"]["properties"]:
+    #    putYarnProperty('yarn.nodemanager.resource.memory-mb', int(services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
+    if 'recommendConfigurations' == callContext:
+      putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))
+    else:
+      # read from the supplied config
+      if "yarn-site" in services["configurations"] and "yarn.nodemanager.resource.memory-mb" in services["configurations"]["yarn-site"]["properties"]:
+        putYarnProperty('yarn.nodemanager.resource.memory-mb', int(services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
+      else:
+        putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))
+      pass
+    pass
+
+    putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['yarnMinContainerSize']))
     putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
     putYarnEnvProperty('min_user_id', self.get_system_min_uid())
 
@@ -887,7 +905,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     cluster["totalAvailableRam"] = max(512, totalAvailableRam * 1024)
     Logger.info("Memory for YARN apps - cluster[totalAvailableRam]: " + str(cluster["totalAvailableRam"]))
 
-    suggestedMinContainerRam = 1024
+    suggestedMinContainerRam = 1024   # new smaller value for YARN min container
     callContext = getCallContext(services)
 
     if services:  # its never None but some unit tests pass it as None
@@ -899,14 +917,14 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
                 str(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]).isdigit():
           Logger.info("Using user provided yarn.scheduler.minimum-allocation-mb = " +
                       str(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
-          cluster["minContainerRam"] = int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])
-          Logger.info("Minimum ram per container due to user input - cluster[minContainerRam]: " + str(cluster["minContainerRam"]))
-          if cluster["minContainerRam"] > cluster["totalAvailableRam"]:
-            cluster["minContainerRam"] = cluster["totalAvailableRam"]
-            Logger.info("Minimum ram per container after checking against limit - cluster[minContainerRam]: " + str(cluster["minContainerRam"]))
+          cluster["yarnMinContainerSize"] = int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])
+          Logger.info("Minimum ram per container due to user input - cluster[yarnMinContainerSize]: " + str(cluster["yarnMinContainerSize"]))
+          if cluster["yarnMinContainerSize"] > cluster["totalAvailableRam"]:
+            cluster["yarnMinContainerSize"] = cluster["totalAvailableRam"]
+            Logger.info("Minimum ram per container after checking against limit - cluster[yarnMinContainerSize]: " + str(cluster["yarnMinContainerSize"]))
             pass
-          cluster["minContainerSize"] = cluster["minContainerRam"]
-          suggestedMinContainerRam = cluster["minContainerRam"]
+          cluster["minContainerSize"] = cluster["yarnMinContainerSize"]    # set to what user has suggested as YARN min container size
+          suggestedMinContainerRam = cluster["yarnMinContainerSize"]
           pass
         pass
       pass
@@ -925,19 +943,19 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       pass
 
     cluster["ramPerContainer"] = int(abs(cluster["totalAvailableRam"] / cluster["containers"]))
-    cluster["minContainerRam"] = min(suggestedMinContainerRam, cluster["ramPerContainer"])
+    cluster["yarnMinContainerSize"] = min(suggestedMinContainerRam, cluster["ramPerContainer"])
     Logger.info("Ram per containers before normalization - cluster[ramPerContainer]: " + str(cluster["ramPerContainer"]))
 
-    '''If greater than cluster["minContainerRam"], value will be in multiples of cluster["minContainerRam"]'''
-    if cluster["ramPerContainer"] > cluster["minContainerRam"]:
-      cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / cluster["minContainerRam"]) * cluster["minContainerRam"]
+    '''If greater than cluster["yarnMinContainerSize"], value will be in multiples of cluster["yarnMinContainerSize"]'''
+    if cluster["ramPerContainer"] > cluster["yarnMinContainerSize"]:
+      cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / cluster["yarnMinContainerSize"]) * cluster["yarnMinContainerSize"]
 
 
     cluster["mapMemory"] = int(cluster["ramPerContainer"])
     cluster["reduceMemory"] = cluster["ramPerContainer"]
     cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
 
-    Logger.info("Min container size - cluster[minContainerRam]: " + str(cluster["minContainerRam"]))
+    Logger.info("Min container size - cluster[yarnMinContainerSize]: " + str(cluster["yarnMinContainerSize"]))
     Logger.info("Available memory for map - cluster[mapMemory]: " + str(cluster["mapMemory"]))
     Logger.info("Available memory for reduce - cluster[reduceMemory]: " + str(cluster["reduceMemory"]))
     Logger.info("Available memory for am - cluster[amMemory]: " + str(cluster["amMemory"]))

http://git-wip-us.apache.org/repos/asf/ambari/blob/e0d78edb/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index a486fb3..2a97b75 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -258,7 +258,6 @@ class TestHDP206StackAdvisor(TestCase):
     result = self.stackAdvisor.validateConfigurations(services, hosts)
 
     expectedItems = [
-      {"message": "Value is less than the recommended default of 510", "level": "WARN"},
       {'message': 'Value should be set for yarn.nodemanager.linux-container-executor.group', 'level': 'ERROR'},
       {"message": "Value should be integer", "level": "ERROR"},
       {"message": "Value should be set", "level": "ERROR"}
@@ -472,7 +471,7 @@ class TestHDP206StackAdvisor(TestCase):
       "reservedRam": 4,
       "hbaseRam": 4,
       "minContainerSize": 1024,
-      "minContainerRam" : 1024,
+      "yarnMinContainerSize" : 1024,
       "totalAvailableRam": 15360,
       "containers": 4,
       "ramPerContainer": 3072,
@@ -584,7 +583,7 @@ class TestHDP206StackAdvisor(TestCase):
       "reservedRam": 4,
       "hbaseRam": 4,
       "minContainerSize": 2048,
-      "minContainerRam" : 2048,
+      "yarnMinContainerSize" : 2048,
       "totalAvailableRam": 15360,
       "containers": 4,
       "ramPerContainer": 2048,
@@ -649,7 +648,7 @@ class TestHDP206StackAdvisor(TestCase):
       "reservedRam": 4,
       "hbaseRam": 4,
       "minContainerSize": 2048,
-      "minContainerRam" : 2048,
+      "yarnMinContainerSize" : 2048,
       "totalAvailableRam": 15360,
       "containers": 4,
       "ramPerContainer": 2048,
@@ -714,7 +713,7 @@ class TestHDP206StackAdvisor(TestCase):
       "reservedRam": 4,
       "hbaseRam": 4,
       "minContainerSize": 4096,
-      "minContainerRam" : 4096,
+      "yarnMinContainerSize" : 4096,
       "totalAvailableRam": 15360,
       "containers": 3,
       "ramPerContainer": 4096,
@@ -761,7 +760,7 @@ class TestHDP206StackAdvisor(TestCase):
       "reservedRam": 2,
       "hbaseRam": 1,
       "minContainerSize": 512,
-      "minContainerRam" : 512,
+      "yarnMinContainerSize" : 512,
       "totalAvailableRam": 3072,
       "containers": 6,
       "ramPerContainer": 512,
@@ -837,7 +836,7 @@ class TestHDP206StackAdvisor(TestCase):
     expected["mapMemory"] = 128
     expected["minContainerSize"] = 128
     expected["reduceMemory"] = 128
-    expected["minContainerRam"] = 128
+    expected["yarnMinContainerSize"] = 128
     expected["ram"] = 0
     expected["ramPerContainer"] = 128
     expected["reservedRam"] = 1
@@ -881,7 +880,7 @@ class TestHDP206StackAdvisor(TestCase):
       "mapMemory": 3072,
       "reduceMemory": 3072,
       "amMemory": 3072,
-      "minContainerRam": 1024,
+      "yarnMinContainerSize": 1024,
       "referenceHost": hosts["items"][0]["Hosts"]
     }
 
@@ -938,7 +937,7 @@ class TestHDP206StackAdvisor(TestCase):
     clusterData = {
       "containers" : 5,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       "yarn-env": {
@@ -966,7 +965,7 @@ class TestHDP206StackAdvisor(TestCase):
       "mapMemory": 567,
       "reduceMemory": 345.6666666666666,
       "amMemory": 123.54,
-      "minContainerRam": 123.54
+      "yarnMinContainerSize": 123.54
     }
     expected = {
       "mapred-site": {
@@ -1009,7 +1008,7 @@ class TestHDP206StackAdvisor(TestCase):
       "mapMemory": 170,
       "reduceMemory": 170,
       "amMemory": 170,
-      "minContainerRam" : 170
+      "yarnMinContainerSize" : 170
     }
 
     self.assertEquals(result, expected)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e0d78edb/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 8a82c99..a26b661 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -90,7 +90,7 @@ class TestHDP22StackAdvisor(TestCase):
       "reduceMemory": 2056,
       "containers": 3,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       "tez-site": {
@@ -184,7 +184,7 @@ class TestHDP22StackAdvisor(TestCase):
       "amMemory": 3100,
       "reduceMemory": 2056,
       "containers": 3,
-      "minContainerRam": 256,
+      "yarnMinContainerSize": 256,
       "ramPerContainer": 256
     }
     expected = {
@@ -271,7 +271,7 @@ class TestHDP22StackAdvisor(TestCase):
       "reduceMemory": 760,
       "containers": 3,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       "tez-site": {
@@ -888,7 +888,7 @@ class TestHDP22StackAdvisor(TestCase):
       "cpu": 4,
       "containers" : 5,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       "yarn-env": {
@@ -928,7 +928,7 @@ class TestHDP22StackAdvisor(TestCase):
       "cpu": 4,
       "containers": 5,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       "spark-defaults": {
@@ -961,7 +961,7 @@ class TestHDP22StackAdvisor(TestCase):
       "cpu": 4,
       "containers" : 5,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       "yarn-env": {
@@ -1223,7 +1223,7 @@ class TestHDP22StackAdvisor(TestCase):
       "cpu": 4,
       "containers" : 5,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
 
     services = {
@@ -1271,7 +1271,7 @@ class TestHDP22StackAdvisor(TestCase):
       "reduceMemory": 2056,
       "containers": 3,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
 
     expected = {
@@ -1762,7 +1762,7 @@ class TestHDP22StackAdvisor(TestCase):
       "containers" : 7,
       "ramPerContainer": 256,
       "totalAvailableRam": 4096,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       "cluster-env": {
@@ -2031,7 +2031,7 @@ class TestHDP22StackAdvisor(TestCase):
       "cpu": 4,
       "containers" : 5,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       "yarn-env": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e0d78edb/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index 790c6a7..ad962fd 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -375,7 +375,7 @@ class TestHDP25StackAdvisor(TestCase):
       "cpu": 4,
       "containers": 5,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       "spark2-defaults": {
@@ -803,7 +803,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
       },
-      "minContainerRam": 512
+      "yarnMinContainerSize": 512
     }
 
 
@@ -972,7 +972,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
       },
-      "minContainerRam": 512
+      "yarnMinContainerSize": 512
     }
 
     configurations = {
@@ -1153,7 +1153,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
       },
-      "minContainerRam": 512
+      "yarnMinContainerSize": 512
     }
 
 
@@ -1353,7 +1353,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 2048
       },
-      "minContainerRam": 512
+      "yarnMinContainerSize": 512
     }
 
 
@@ -1546,7 +1546,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 51200 * 1024
       },
-      "minContainerRam": 1024
+      "yarnMinContainerSize": 1024
     }
 
     configurations = {
@@ -1747,7 +1747,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 40960 * 1024
       },
-      "minContainerRam": 1024
+      "yarnMinContainerSize": 1024
     }
 
     configurations = {
@@ -1942,7 +1942,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 12288 * 1024
       },
-      "minContainerRam": 341
+      "yarnMinContainerSize": 341
     }
 
 
@@ -2140,7 +2140,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 204800 * 1024
       },
-      "minContainerRam": 1024
+      "yarnMinContainerSize": 1024
     }
 
     configurations = {
@@ -2341,7 +2341,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 40960 * 1024
       },
-      "minContainerRam": 1024
+      "yarnMinContainerSize": 1024
     }
 
 
@@ -2538,7 +2538,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 204800 * 1024
       },
-      "minContainerRam": 341
+      "yarnMinContainerSize": 341
     }
 
 
@@ -2734,7 +2734,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 204800 * 1024
       },
-      "minContainerRam": 1024
+      "yarnMinContainerSize": 1024
     }
 
     configurations = {
@@ -2925,7 +2925,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 328960 * 1024
       },
-      "minContainerRam": 1024
+      "yarnMinContainerSize": 1024
     }
 
 
@@ -3145,7 +3145,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
       },
-      "minContainerRam": 512
+      "yarnMinContainerSize": 512
     }
 
     configurations = {
@@ -3159,12 +3159,12 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.server2.tez.sessions.per.default.queue'], {'maximum': '1.0', 'minimum': '1'})
 
     self.assertTrue(configurations['hive-interactive-env']['properties']['num_llap_nodes'], 3)
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '9728')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '204288')
 
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '3')
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.threadpool.size'], '3')
 
-    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'], '3584')
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'], '198144')
     self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.enabled'], 'true')
 
     self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'], '4915')
@@ -3370,7 +3370,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
       },
-      "minContainerRam": 512
+      "yarnMinContainerSize": 512
     }
 
     configurations = {
@@ -3563,7 +3563,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 328960 * 1024
       },
-      "minContainerRam": 1024
+      "yarnMinContainerSize": 1024
     }
 
     configurations = {
@@ -3746,7 +3746,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 328960 * 1024
       },
-      "minContainerRam": 1024
+      "yarnMinContainerSize": 1024
     }
 
     configurations = {
@@ -3877,7 +3877,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 10240 * 1024
       },
-      "minContainerRam": 512
+      "yarnMinContainerSize": 512
     }
 
     configurations = {
@@ -3923,7 +3923,7 @@ class TestHDP25StackAdvisor(TestCase):
       "reduceMemory": 2056,
       "containers": 3,
       "ramPerContainer": 256,
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     expected = {
       'application-properties': {
@@ -4659,7 +4659,7 @@ class TestHDP25StackAdvisor(TestCase):
       "referenceNodeManagerHost" : {
         "total_mem" : 328960 * 1024
       },
-      "minContainerRam": 256
+      "yarnMinContainerSize": 256
     }
     hosts = {
       "items" : [


[20/50] [abbrv] ambari git commit: Revert "AMBARI-19545: Ambari-agent - In HIVE and OOZIE stack scripts, copy JCEKS file to desired location"

Posted by nc...@apache.org.
Revert "AMBARI-19545: Ambari-agent - In HIVE and OOZIE stack scripts, copy JCEKS file to desired location"

This reverts commit ded8ee71c1c50ac76bc70ad8df8c39c7654d3fe9.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6fa54aee
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6fa54aee
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6fa54aee

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 6fa54aeeb368c8f50803b615cc14b9bf94964205
Parents: fecf197
Author: Nahappan Somasundaram <ns...@hortonworks.com>
Authored: Tue Jan 17 11:00:54 2017 -0800
Committer: Nahappan Somasundaram <ns...@hortonworks.com>
Committed: Tue Jan 17 11:01:37 2017 -0800

----------------------------------------------------------------------
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     | 31 +----------------
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    | 36 ++------------------
 2 files changed, 3 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6fa54aee/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index 51e3b9f..f825982 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -20,7 +20,6 @@ limitations under the License.
 
 import os
 import glob
-
 from urlparse import urlparse
 
 from resource_management.libraries.script.script import Script
@@ -47,33 +46,6 @@ from ambari_commons.constants import SERVICE
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
 
-# The property name used by the hadoop credential provider
-HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME = 'hadoop.security.credential.provider.path'
-
-# Move JCEKS provider to service specific location and update the ACL
-def update_credential_provider_path(config_type, dest_provider_path):
-  import params
-
-  # Get the path to the provider <config_type>.jceks
-  if HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME in params.config['configurations'][config_type]:
-    provider_paths = params.config['configurations'][config_type][HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME].split(',')
-    for path_index in range(len(provider_paths)):
-      provider_path = provider_paths[path_index]
-      if config_type == os.path.splitext(os.path.basename(provider_path))[0]:
-        src_provider_path = provider_path[len('jceks://file'):]
-        File(dest_provider_path,
-          owner = params.hive_user,
-          group = params.user_group,
-          mode = 0640,
-          content = StaticFile(src_provider_path)
-        )
-        provider_paths[path_index] = 'jceks://file{0}'.format(dest_provider_path)
-        # make a copy of the config dictionary since it is read-only
-        config = params.config['configurations'][config_type].copy()
-        config[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = ','.join(provider_paths)
-        return config
-    return params.config['configurations'][config_type]
-
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
 def hive(name=None):
   import params
@@ -92,10 +64,9 @@ def hive(name=None):
   for conf_dir in params.hive_conf_dirs_list:
     fill_conf_dir(conf_dir)
 
-  hive_site_config = update_credential_provider_path('hive-site', os.path.join(params.hive_conf_dir, 'hive-site.jceks'))
   XmlConfig("hive-site.xml",
             conf_dir=params.hive_config_dir,
-            configurations=hive_site_config,
+            configurations=params.hive_site_config,
             configuration_attributes=params.config['configuration_attributes']['hive-site'],
             owner=params.hive_user,
             group=params.user_group,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6fa54aee/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 3cdafe9..4a472ff 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -27,7 +27,6 @@ from resource_management.core.resources.system import Directory, Execute, File
 from resource_management.core.source import DownloadSource
 from resource_management.core.source import InlineTemplate
 from resource_management.core.source import Template
-from resource_management.core.source import StaticFile
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import StackFeature
@@ -51,33 +50,6 @@ from ambari_commons.inet_utils import download_file
 
 from resource_management.core import Logger
 
-# The property name used by the hadoop credential provider
-HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME = 'hadoop.security.credential.provider.path'
-
-# Move JCEKS provider to service specific locationa and update the ACL
-def update_credential_provider_path(config_type, dest_provider_path):
-  import params
-
-  # Get the path to the provider <config_type>.jceks
-  if HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME in params.config['configurations'][config_type]:
-    provider_paths = params.config['configurations'][config_type][HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME].split(',')
-    for path_index in range(len(provider_paths)):
-      provider_path = provider_paths[path_index]
-      if config_type == os.path.splitext(os.path.basename(provider_path))[0]:
-        src_provider_path = provider_path[len('jceks://file'):]
-        Logger.info('src_provider_path={0}, dest_provider_path{1}'.format(src_provider_path, dest_provider_path))
-        File(dest_provider_path,
-          owner = params.oozie_user,
-          group = params.user_group,
-          mode = 0640,
-          content = StaticFile(src_provider_path)
-        )
-        provider_paths[path_index] = 'jceks://file{0}'.format(dest_provider_path)
-        # make a copy of the config dictionary since it cannot be modified
-        config = params.config['configurations'][config_type].copy()
-        config[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = ','.join(provider_paths)
-        return config
-    return params.config['configurations'][config_type]
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def oozie(is_server=False):
@@ -143,12 +115,9 @@ def oozie(is_server=False):
              owner = params.oozie_user,
              group = params.user_group
   )
-
-  oozie_site_config = update_credential_provider_path('oozie-site', os.path.join(params.conf_dir, 'oozie-site.jceks'))
-
   XmlConfig("oozie-site.xml",
     conf_dir = params.conf_dir,
-    configurations = oozie_site_config,
+    configurations = params.oozie_site,
     configuration_attributes=params.config['configuration_attributes']['oozie-site'],
     owner = params.oozie_user,
     group = params.user_group,
@@ -320,10 +289,9 @@ def oozie_server_specific():
         group = params.user_group
     )
     if 'hive-site' in params.config['configurations']:
-      hive_site_config = update_credential_provider_path('hive-site', os.path.join(params.hive_conf_dir, 'hive-site.jceks'))
       XmlConfig("hive-site.xml",
         conf_dir=params.hive_conf_dir,
-        configurations=hive_site_config,
+        configurations=params.config['configurations']['hive-site'],
         configuration_attributes=params.config['configuration_attributes']['hive-site'],
         owner=params.oozie_user,
         group=params.user_group,


[09/50] [abbrv] ambari git commit: AMBARI-19572 Move Master and HA wizards for all components should show config changes that will be done as part of the wizard. (ababiichuk)

Posted by nc...@apache.org.
AMBARI-19572 Move Master and HA wizards for all components should show config changes that will be done as part of the wizard. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cba69d93
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cba69d93
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cba69d93

Branch: refs/heads/branch-dev-patch-upgrade
Commit: cba69d93c0e062b37d89b75db00a16e0fa4369e2
Parents: b7d8f5e
Author: ababiichuk <ab...@hortonworks.com>
Authored: Mon Jan 16 18:54:21 2017 +0200
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Tue Jan 17 12:19:40 2017 +0200

----------------------------------------------------------------------
 .../main/service/reassign/step1_controller.js   |  78 ++-
 .../main/service/reassign/step3_controller.js   | 663 +++++++++++++++++++
 .../main/service/reassign/step4_controller.js   | 629 ++----------------
 .../main/service/reassign_controller.js         |  47 +-
 ambari-web/app/messages.js                      |  21 +-
 ambari-web/app/routes/reassign_master_routes.js |  24 +-
 ambari-web/app/styles/wizard.less               |   2 +-
 .../highAvailability/journalNode/step2.hbs      |  65 +-
 .../templates/main/service/reassign/step3.hbs   |  36 +-
 .../views/main/service/reassign/step3_view.js   |   4 +-
 .../views/main/service/reassign/step5_view.js   |   4 +-
 .../service/reassign/step1_controller_test.js   |  17 +-
 .../service/reassign/step3_controller_test.js   | 634 ++++++++++++++++++
 .../service/reassign/step4_controller_test.js   | 646 +-----------------
 14 files changed, 1582 insertions(+), 1288 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/controllers/main/service/reassign/step1_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/reassign/step1_controller.js b/ambari-web/app/controllers/main/service/reassign/step1_controller.js
index 9f30bb7..c631e85 100644
--- a/ambari-web/app/controllers/main/service/reassign/step1_controller.js
+++ b/ambari-web/app/controllers/main/service/reassign/step1_controller.js
@@ -26,9 +26,18 @@ App.ReassignMasterWizardStep1Controller = Em.Controller.extend({
    * @type {object}
    */
   dbPropertyMap: {
-    'HIVE_SERVER': 'javax.jdo.option.ConnectionDriverName',
-    'HIVE_METASTORE': 'javax.jdo.option.ConnectionDriverName',
-    'OOZIE_SERVER': 'oozie.service.JPAService.jdbc.driver'
+    'HIVE_SERVER': {
+      type: 'hive-site',
+      name: 'javax.jdo.option.ConnectionDriverName'
+    },
+    'HIVE_METASTORE': {
+      type: 'hive-site',
+      name: 'javax.jdo.option.ConnectionDriverName'
+    },
+    'OOZIE_SERVER': {
+      type: 'oozie-site',
+      name: 'oozie.service.JPAService.jdbc.driver'
+    }
   },
 
   loadConfigsTags: function () {
@@ -79,55 +88,78 @@ App.ReassignMasterWizardStep1Controller = Em.Controller.extend({
   },
 
   onLoadConfigs: function (data) {
-    var databaseProperty = null,
-        databaseType = null,
-        properties = {},
-        isRemoteDB = null;
+    var databaseProperty,
+      databaseType = null,
+      databaseTypeMatch,
+      properties = {},
+      configs = {},
+      dbPropertyMapItem = Em.getWithDefault(this.get('dbPropertyMap'), this.get('content.reassign.component_name'), null),
+      serviceDbProp = this.get('content.reassign.service_id').toLowerCase() + '_database';
 
     data.items.forEach(function(item) {
-      $.extend(properties, item.properties);
+      configs[item.type] = item.properties;
     });
 
-    this.set('content.serviceProperties', properties);
+    this.get('content').setProperties({
+      serviceProperties: properties,
+      configs: configs
+    });
 
-    databaseProperty = properties[ Em.getWithDefault(this.get('dbPropertyMap'), this.get('content.reassign.component_name'), null) ];
-    databaseType = databaseProperty.match(/MySQL|PostgreS|Oracle|Derby|MSSQL|Anywhere/gi)[0];
+    if (dbPropertyMapItem) {
+      databaseProperty = Em.getWithDefault(configs, dbPropertyMapItem.type, {})[dbPropertyMapItem.name];
+      databaseTypeMatch = databaseProperty && databaseProperty.match(/MySQL|PostgreS|Oracle|Derby|MSSQL|Anywhere/gi);
+      if (databaseTypeMatch) {
+        databaseType = databaseTypeMatch[0];
+      }
+    }
     this.set('databaseType', databaseType);
 
     if (this.get('content.reassign.component_name') == 'OOZIE_SERVER' && databaseType !== 'derby') {
       App.router.reassignMasterController.set('content.hasManualSteps', false);
     }
 
-    var serviceDbProp = this.get('content.reassign.service_id').toLowerCase() + "_database";
-    properties['is_remote_db'] = /Existing/ig.test( properties[serviceDbProp] );
+    properties['is_remote_db'] = /Existing/ig.test(properties[serviceDbProp]);
 
     properties['database_hostname'] = this.getDatabaseHost();
 
     this.saveDatabaseType(databaseType);
     this.saveServiceProperties(properties);
+    this.saveConfigs(configs);
   },
 
   saveDatabaseType: function(type) {
-    if(type) {
+    if (type) {
       App.router.get(this.get('content.controllerName')).saveDatabaseType(type);
     }
   },
 
   saveServiceProperties: function(properties) {
-    if(properties) {
+    if (properties) {
       App.router.get(this.get('content.controllerName')).saveServiceProperties(properties);
     }
   },
 
+  saveConfigs: function(configs) {
+    if (configs) {
+      App.router.get(this.get('content.controllerName')).saveConfigs(configs);
+    }
+  },
+
   getDatabaseHost: function() {
-    var db_type = this.get('databaseType');
-    var connectionURLPRops = {
-      'HIVE': 'javax.jdo.option.ConnectionURL',
-      'OOZIE': 'oozie.service.JPAService.jdbc.url'
-    };
-
-    var service = this.get('content.reassign.service_id');
-    var connectionURL = this.get('content.serviceProperties')[connectionURLPRops[service]];
+    var db_type = this.get('databaseType'),
+      connectionURLProps = {
+        'HIVE': {
+          type: 'hive-site',
+          name: 'javax.jdo.option.ConnectionURL'
+        },
+        'OOZIE': {
+          type: 'oozie-site',
+          name: 'oozie.service.JPAService.jdbc.url'
+        }
+      },
+      service = this.get('content.reassign.service_id'),
+      connectionURLPropsItem = connectionURLProps[service],
+      connectionURL = Em.getWithDefault(this.get('content.configs'), connectionURLPropsItem.type, {})[connectionURLPropsItem.name];
 
     connectionURL = connectionURL.replace("jdbc:" + db_type + "://", "");
     connectionURL = connectionURL.replace("/hive?createDatabaseIfNotExist=true", "");

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/controllers/main/service/reassign/step3_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/reassign/step3_controller.js b/ambari-web/app/controllers/main/service/reassign/step3_controller.js
index d717dea..f13cf0b 100644
--- a/ambari-web/app/controllers/main/service/reassign/step3_controller.js
+++ b/ambari-web/app/controllers/main/service/reassign/step3_controller.js
@@ -21,6 +21,669 @@ var App = require('app');
 App.ReassignMasterWizardStep3Controller = Em.Controller.extend({
   name: 'reassignMasterWizardStep3Controller',
 
+  componentSpecificTypesMap: {
+    'NAMENODE': [
+      {
+        serviceName: 'HBASE',
+        configTypes: ['hbase-site']
+      },
+      {
+        serviceName: 'ACCUMULO',
+        configTypes: ['accumulo-site']
+      },
+      {
+        serviceName: 'HAWQ',
+        configTypes: ['hawq-site', 'hdfs-client']
+      }
+    ],
+    'RESOURCEMANAGER': [
+      {
+        serviceName: 'HAWQ',
+        configTypes: ['hawq-site', 'yarn-client']
+      }
+    ]
+  },
+
+  /**
+   * additional configs with template values
+   * Part of value to substitute has following format: "<replace-value>"
+   */
+  additionalConfigsMap: [
+    {
+      componentName: 'RESOURCEMANAGER',
+      configs: {
+        'yarn-site': {
+          'yarn.resourcemanager.address': '<replace-value>:8050',
+          'yarn.resourcemanager.admin.address': '<replace-value>:8141',
+          'yarn.resourcemanager.resource-tracker.address': '<replace-value>:8025',
+          'yarn.resourcemanager.scheduler.address': '<replace-value>:8030',
+          'yarn.resourcemanager.webapp.address': '<replace-value>:8088',
+          'yarn.resourcemanager.hostname': '<replace-value>'
+        }
+      }
+    },
+    {
+      componentName: 'JOBTRACKER',
+      configs: {
+        'mapred-site': {
+          'mapred.job.tracker.http.address': '<replace-value>:50030',
+          'mapred.job.tracker': '<replace-value>:50300'
+        }
+      }
+    },
+    {
+      componentName: 'SECONDARY_NAMENODE',
+      configs: {
+        'hdfs-site': {
+          'dfs.secondary.http.address': '<replace-value>:50090'
+        }
+      },
+      configs_Hadoop2: {
+        'hdfs-site': {
+          'dfs.namenode.secondary.http-address': '<replace-value>:50090'
+        }
+      }
+    },
+    {
+      componentName: 'NAMENODE',
+      configs: {
+        'hdfs-site': {
+          'dfs.http.address': '<replace-value>:50070',
+          'dfs.https.address': '<replace-value>:50470'
+        },
+        'core-site': {
+          'fs.default.name': 'hdfs://<replace-value>:8020'
+        }
+      },
+      configs_Hadoop2: {
+        'hdfs-site': {
+          'dfs.namenode.rpc-address': '<replace-value>:8020',
+          'dfs.namenode.http-address': '<replace-value>:50070',
+          'dfs.namenode.https-address': '<replace-value>:50470'
+        },
+        'core-site': {
+          'fs.defaultFS': 'hdfs://<replace-value>:8020'
+        }
+      }
+    },
+    {
+      componentName: 'APP_TIMELINE_SERVER',
+      configs: {
+        'yarn-site': {
+          'yarn.timeline-service.webapp.address': '<replace-value>:8188',
+          'yarn.timeline-service.webapp.https.address': '<replace-value>:8190',
+          'yarn.timeline-service.address': '<replace-value>:10200'
+        }
+      }
+    },
+    {
+      componentName: 'OOZIE_SERVER',
+      configs: {
+        'oozie-site': {
+          'oozie.base.url': 'http://<replace-value>:11000/oozie'
+        },
+        'core-site': {
+          'hadoop.proxyuser.oozie.hosts': '<replace-value>'
+        }
+      }
+    },
+    {
+      componentName: 'HIVE_METASTORE',
+      configs: {
+        'hive-site': {}
+      }
+    },
+    {
+      componentName: 'MYSQL_SERVER',
+      configs: {
+        'hive-site': {
+          'javax.jdo.option.ConnectionURL': 'jdbc:mysql://<replace-value>/hive?createDatabaseIfNotExist=true'
+        }
+      }
+    },
+    {
+      componentName: 'HISTORYSERVER',
+      configs: {
+        'mapred-site': {
+          'mapreduce.jobhistory.webapp.address': '<replace-value>:19888',
+          'mapreduce.jobhistory.address': '<replace-value>:10020'
+        }
+      }
+    }
+  ],
+
+  secureConfigsMap: [
+    {
+      componentName: 'NAMENODE',
+      configs: [
+        {
+          site: 'hdfs-site',
+          keytab: 'dfs.namenode.keytab.file',
+          principal: 'dfs.namenode.kerberos.principal'
+        },
+        {
+          site: 'hdfs-site',
+          keytab: 'dfs.web.authentication.kerberos.keytab',
+          principal: 'dfs.web.authentication.kerberos.principal'
+        }
+      ]
+    },
+    {
+      componentName: 'SECONDARY_NAMENODE',
+      configs: [
+        {
+          site: 'hdfs-site',
+          keytab: 'dfs.secondary.namenode.keytab.file',
+          principal: 'dfs.secondary.namenode.kerberos.principal'
+        },
+        {
+          site: 'hdfs-site',
+          keytab: 'dfs.web.authentication.kerberos.keytab',
+          principal: 'dfs.web.authentication.kerberos.principal'
+        }
+      ]
+    },
+    {
+      componentName: 'RESOURCEMANAGER',
+      configs: [
+        {
+          site: 'yarn-site',
+          keytab: 'yarn.resourcemanager.keytab',
+          principal: 'yarn.resourcemanager.principal'
+        },
+        {
+          site: 'yarn-site',
+          keytab: 'yarn.resourcemanager.webapp.spnego-keytab-file',
+          principal: 'yarn.resourcemanager.webapp.spnego-principal'
+        }
+      ]
+    },
+    {
+      componentName: 'OOZIE_SERVER',
+      configs: [
+        {
+          site: 'oozie-site',
+          keytab: 'oozie.authentication.kerberos.keytab',
+          principal: 'oozie.authentication.kerberos.principal'
+        },
+        {
+          site: 'oozie-site',
+          keytab: 'oozie.service.HadoopAccessorService.keytab.file',
+          principal: 'oozie.service.HadoopAccessorService.kerberos.principal'
+        }
+      ]
+    },
+    {
+      componentName: 'WEBHCAT_SERVER',
+      configs: [
+        {
+          site: 'webhcat-site',
+          keytab: 'templeton.kerberos.keytab',
+          principal: 'templeton.kerberos.principal'
+        }
+      ]
+    },
+    {
+      componentName: 'HIVE_SERVER',
+      configs: [
+        {
+          site: 'hive-site',
+          keytab: 'hive.server2.authentication.kerberos.keytab',
+          principal: 'hive.server2.authentication.kerberos.principal'
+        },
+        {
+          site: 'hive-site',
+          keytab: 'hive.server2.authentication.spnego.keytab',
+          principal: 'hive.server2.authentication.spnego.principal'
+        }
+      ]
+    },
+    {
+      componentName: 'HIVE_METASTORE',
+      configs: [
+        {
+          site: 'hive-site',
+          keytab: 'hive.metastore.kerberos.keytab.file',
+          principal: 'hive.metastore.kerberos.principal'
+        }
+      ]
+    }
+
+  ],
+
+  isLoaded: false,
+
+  versionLoaded: true,
+
+  hideDependenciesInfoBar: true,
+
+  configs: null,
+
+  secureConfigs: [],
+
+  stepConfigs: [],
+
+  propertiesToChange: {},
+
+  isSubmitDisabled: Em.computed.and('wizardController.isComponentWithReconfiguration', '!isLoaded'),
+
+  loadStep: function () {
+    if (this.get('wizardController.isComponentWithReconfiguration')) {
+      this.set('isLoaded', false);
+      App.ajax.send({
+        name: 'config.tags',
+        sender: this,
+        success: 'onLoadConfigsTags'
+      });
+    }
+  },
+
+  clearStep: function () {
+    this.setProperties({
+      configs: null,
+      secureConfigs: [],
+      propertiesToChange: {}
+    });
+  },
+
+  onLoadConfigsTags: function (data) {
+    var urlParams = this.getConfigUrlParams(this.get('content.reassign.component_name'), data);
+
+    App.ajax.send({
+      name: 'reassign.load_configs',
+      sender: this,
+      data: {
+        urlParams: urlParams.join('|')
+      },
+      success: 'onLoadConfigs'
+    });
+  },
+
+  getConfigUrlParams: function (componentName, data) {
+    var urlParams = [];
+
+    this.get('wizardController.serviceToConfigSiteMap')[componentName].forEach(function(site){
+      urlParams.push('(type=' + site + '&tag=' + data.Clusters.desired_configs[site].tag + ')');
+    });
+
+    // specific cases for certain components
+    var specificTypes = this.get('componentSpecificTypesMap')[componentName];
+    if (specificTypes) {
+      var services = App.Service.find();
+      specificTypes.forEach(function (service) {
+        if (services.someProperty('serviceName', service.serviceName)) {
+          service.configTypes.forEach(function (site) {
+            urlParams.push('(type=' + site + '&tag=' + data.Clusters.desired_configs[site].tag + ')');
+          });
+        }
+      });
+    }
+
+    return urlParams;
+  },
+
+  renderServiceConfigs: function (configs) {
+    var self = this,
+      configCategories = [],
+      displayedConfigs = [],
+      serviceConfig = App.ServiceConfig.create({
+        serviceName: 'MISC',
+        configCategories: configCategories,
+        showConfig: true,
+        configs: displayedConfigs
+      });
+    App.get('router.mainController.isLoading').call(App.get('router.clusterController'), 'isConfigsPropertiesLoaded').done(function () {
+      Em.keys(self.get('propertiesToChange')).forEach(function (type) {
+        var service = App.config.get('serviceByConfigTypeMap')[type];
+        if (service) {
+          var serviceName = service.get('serviceName');
+          if (!configCategories.someProperty('name', serviceName)) {
+            configCategories.push(App.ServiceConfigCategory.create({
+              name: serviceName,
+              displayName: service.get('displayName')
+            }));
+          }
+          this.get('propertiesToChange')[type].forEach(function (property) {
+            var propertyName = property.name,
+              stackProperty = App.configsCollection.getConfigByName(propertyName, type) || {},
+              displayedProperty = App.ServiceConfigProperty.create({
+                name: propertyName,
+                displayName: propertyName,
+                fileName: type
+              }, stackProperty, {
+                value: configs[type][propertyName],
+                category: serviceName,
+                isEditable: Boolean(stackProperty.isEditable !== false && !property.isSecure)
+              });
+            displayedConfigs.push(displayedProperty);
+          });
+        }
+      }, self);
+      self.setProperties({
+        stepConfigs: [serviceConfig],
+        selectedService: serviceConfig,
+        isLoaded: true
+      });
+    });
+  },
+
+  onLoadConfigs: function (data) {
+    // Find hawq-site.xml location
+    var hawqSiteIndex = -1;
+    for(var i = 0; i < data.items.length; i++){
+      if(data.items[i].type == 'hawq-site'){
+        hawqSiteIndex = i;
+        break;
+      }
+    }
+
+    // if certain services are deployed, include related site files to additionalConfigsMap and relatedServicesMap.
+    if(hawqSiteIndex >= 0){ // if HAWQ is deployed
+      var hawqSiteProperties = {
+        'hawq_rm_yarn_address': '<replace-value>:8050',
+        'hawq_rm_yarn_scheduler_address': '<replace-value>:8030'
+      }
+
+      var rmComponent = this.get('additionalConfigsMap').findProperty('componentName', "RESOURCEMANAGER");
+      rmComponent.configs["hawq-site"] = hawqSiteProperties;
+
+      if(data.items[hawqSiteIndex].properties["hawq_global_rm_type"].toLowerCase() === "yarn"){
+        this.get('wizardController.relatedServicesMap')['RESOURCEMANAGER'].append('HAWQ');
+      }
+
+    }
+
+    var componentName = this.get('content.reassign.component_name');
+    var targetHostName = this.get('content.reassignHosts.target');
+    var configs = {};
+    var secureConfigs = [];
+
+    data.items.forEach(function (item) {
+      configs[item.type] = item.properties;
+    });
+
+    this.setAdditionalConfigs(configs, componentName, targetHostName);
+    this.setSecureConfigs(secureConfigs, configs, componentName);
+
+    this.set('secureConfigs', secureConfigs);
+
+    switch (componentName) {
+      case 'NAMENODE':
+        App.MoveNameNodeConfigInitializer.setup(this._getNnInitializerSettings(configs));
+        configs = this.setDynamicConfigs(configs, App.MoveNameNodeConfigInitializer);
+        App.MoveNameNodeConfigInitializer.cleanup();
+        break;
+      case 'RESOURCEMANAGER':
+        App.MoveRmConfigInitializer.setup(this._getRmInitializerSettings(configs));
+        var additionalDependencies = this._getRmAdditionalDependencies(configs);
+        configs = this.setDynamicConfigs(configs, App.MoveRmConfigInitializer, additionalDependencies);
+        App.MoveRmConfigInitializer.cleanup();
+        break;
+      case 'HIVE_METASTORE':
+        App.MoveHmConfigInitializer.setup(this._getHiveInitializerSettings(configs));
+        configs = this.setDynamicConfigs(configs, App.MoveHmConfigInitializer);
+        App.MoveHmConfigInitializer.cleanup();
+        break;
+      case 'HIVE_SERVER':
+        App.MoveHsConfigInitializer.setup(this._getHiveInitializerSettings(configs));
+        configs = this.setDynamicConfigs(configs, App.MoveHsConfigInitializer);
+        App.MoveHsConfigInitializer.cleanup();
+        break;
+      case 'WEBHCAT_SERVER':
+        App.MoveWsConfigInitializer.setup(this._getWsInitializerSettings(configs));
+        configs = this.setDynamicConfigs(configs, App.MoveWsConfigInitializer);
+        App.MoveWsConfigInitializer.cleanup();
+        break;
+      case 'OOZIE_SERVER':
+        App.MoveOSConfigInitializer.setup(this._getOsInitializerSettings(configs));
+        configs = this.setDynamicConfigs(configs, App.MoveOSConfigInitializer);
+        App.MoveOSConfigInitializer.cleanup();
+    }
+
+    this.renderServiceConfigs(configs);
+    this.set('configs', configs);
+  },
+
+  /**
+   * set additional configs
+   * configs_Hadoop2 - configs which belongs to Hadoop 2 stack only
+   * @param configs
+   * @param componentName
+   * @param replaceValue
+   * @return {Boolean}
+   */
+  setAdditionalConfigs: function (configs, componentName, replaceValue) {
+    var component = this.get('additionalConfigsMap').findProperty('componentName', componentName);
+
+    if (Em.isNone(component)) return false;
+    var additionalConfigs = (component.configs_Hadoop2) ? component.configs_Hadoop2 : component.configs;
+
+    for (var site in additionalConfigs) {
+      if (additionalConfigs.hasOwnProperty(site)) {
+        for (var property in additionalConfigs[site]) {
+          if (additionalConfigs[site].hasOwnProperty(property)) {
+            if (App.get('isHaEnabled') && componentName === 'NAMENODE' && (property === 'fs.defaultFS' || property === 'dfs.namenode.rpc-address')) continue;
+
+            configs[site][property] = additionalConfigs[site][property].replace('<replace-value>', replaceValue);
+            if (!this.get('propertiesToChange').hasOwnProperty(site)) {
+              this.get('propertiesToChange')[site] = [];
+            }
+            this.get('propertiesToChange')[site].push({
+              name: property
+            });
+          }
+        }
+      }
+    }
+    return true;
+  },
+
+  /**
+   * set secure configs for component
+   * @param secureConfigs
+   * @param configs
+   * @param componentName
+   * @return {Boolean}
+   */
+  setSecureConfigs: function (secureConfigs, configs, componentName) {
+    var securityEnabled = App.get('isKerberosEnabled');
+    var component = this.get('secureConfigsMap').findProperty('componentName', componentName);
+    if (Em.isNone(component) || !securityEnabled) return false;
+
+    component.configs.forEach(function (config) {
+      secureConfigs.push({
+        keytab: configs[config.site][config.keytab],
+        principal: configs[config.site][config.principal]
+      });
+      if (!this.get('propertiesToChange').hasOwnProperty(config.site)) {
+        this.get('propertiesToChange')[config.site] = [];
+      }
+      this.get('propertiesToChange')[config.site].push(
+        {
+          name: config.keytab,
+          isSecure: true
+        },
+        {
+          name: config.principal,
+          isSecure: true
+        }
+      );
+    }, this);
+    return true;
+  },
+
+  /**
+   * Get additional dependencies-data for App.MoveNameNodeConfigInitializer
+   *
+   * @param {object} configs
+   * @returns {object}
+   * @private
+   * @method _getNnInitializerSettings
+   */
+  _getNnInitializerSettings: function (configs) {
+    var ret = {};
+    if (App.get('isHaEnabled')) {
+      ret.namespaceId = configs['hdfs-site']['dfs.nameservices'];
+      ret.suffix = (configs['hdfs-site']['dfs.namenode.http-address.' + ret.namespaceId + '.nn1'].indexOf(this.get('content.reassignHosts.source')) != -1) ? 'nn1' : 'nn2';
+    }
+    return ret;
+  },
+
+  /**
+   * Settings used to the App.MoveRmConfigInitializer setup
+   *
+   * @param {object} configs
+   * @returns {{suffix: string}}
+   * @private
+   * @method _getRmInitializerSettings
+   */
+  _getRmInitializerSettings: function (configs) {
+    return {
+      suffix: configs['yarn-site']['yarn.resourcemanager.hostname.rm1'] === this.get('content.reassignHosts.source') ? 'rm1': 'rm2'
+    };
+  },
+
+  /**
+   * Get additional dependencies-data for App.MoveRmConfigInitializer
+   *
+   * @param {object} configs
+   * @returns {object}
+   * @private
+   * @method _getRmAdditionalDependencies
+   */
+  _getRmAdditionalDependencies: function (configs) {
+    var ret = {};
+    var rm1 = configs['yarn-site']['yarn.resourcemanager.hostname.rm1'];
+    if (rm1) {
+      ret.rm1 = rm1;
+    }
+    var rm2 = configs['yarn-site']['yarn.resourcemanager.hostname.rm2'];
+    if (rm2) {
+      ret.rm2 = rm2;
+    }
+    return ret;
+  },
+
+  /**
+   * Settings used to the App.MoveHsConfigInitializer and App.MoveHmConfigInitializer setup
+   *
+   * @param {object} configs
+   * @returns {{hiveUser: string}}
+   * @private
+   * @method _getHiveInitializerSettings
+   */
+  _getHiveInitializerSettings: function (configs) {
+    return {
+      hiveUser: configs['hive-env']['hive_user']
+    };
+  },
+
+  /**
+   * Settings used to the App.MoveWsConfigInitializer setup
+   *
+   * @param {object} configs
+   * @returns {{webhcatUser: string}}
+   * @private
+   * @method _getWsInitializerSettings
+   */
+  _getWsInitializerSettings: function (configs) {
+    return {
+      webhcatUser: configs['hive-env']['webhcat_user']
+    };
+  },
+
+  /**
+   * Settings used to the App.MoveOSConfigInitializer setup
+   *
+   * @param {object} configs
+   * @returns {object}
+   * @private
+   * @method _getOsInitializerSettings
+   */
+  _getOsInitializerSettings: function (configs) {
+    var ret = {};
+    var cfg = configs['oozie-env']['oozie_user'];
+    if (cfg) {
+      ret.oozieUser = cfg;
+    }
+    return ret;
+  },
+
+  /**
+   * Set config values according to the new cluster topology
+   *
+   * @param {object} configs
+   * @param {MoveComponentConfigInitializerClass} initializer
+   * @param {object} [additionalDependencies={}]
+   * @returns {object}
+   * @method setDynamicConfigs
+   */
+  setDynamicConfigs: function (configs, initializer, additionalDependencies) {
+    additionalDependencies = additionalDependencies || {};
+    var topologyDB = this._prepareTopologyDB(),
+      dependencies = this._prepareDependencies(additionalDependencies),
+      initializerObjects = initializer.get('initializers'),
+      uniqueInitializerObjects = initializer.get('uniqueInitializers');
+    Em.keys(configs).forEach(function (site) {
+      Em.keys(configs[site]).forEach(function (config) {
+        // temporary object for initializer
+        var cfg = {
+          name: config,
+          filename: site,
+          value: configs[site][config]
+        };
+        configs[site][config] = initializer.initialValue(cfg, topologyDB, dependencies).value;
+        if (initializerObjects[config] || uniqueInitializerObjects[config]) {
+          if (!this.get('propertiesToChange').hasOwnProperty(site)) {
+            this.get('propertiesToChange')[site] = [];
+          }
+          this.get('propertiesToChange')[site].push({
+            name: config
+          });
+        }
+      }, this);
+    }, this);
+    return configs;
+  },
+
+  /**
+   *
+   * @returns {extendedTopologyLocalDB}
+   * @private
+   * @method _prepareTopologyDB
+   */
+  _prepareTopologyDB: function () {
+    var ret = this.get('content').getProperties(['masterComponentHosts', 'slaveComponentHosts', 'hosts']);
+    ret.installedServices = App.Service.find().mapProperty('serviceName');
+    return ret;
+  },
+
+  /**
+   * Create dependencies for Config Initializers
+   *
+   * @param {object} additionalDependencies  some additional information that should be added
+   * @returns {reassignComponentDependencies}
+   * @private
+   * @method _prepareDependencies
+   */
+  _prepareDependencies: function (additionalDependencies) {
+    additionalDependencies = additionalDependencies || {};
+    var ret = {};
+    ret.sourceHostName = this.get('content.reassignHosts.source');
+    ret.targetHostName = this.get('content.reassignHosts.target');
+    return Em.merge(ret, additionalDependencies);
+  },
+
+  updateServiceConfigs: function () {
+    var configs = this.get('configs');
+    if (configs) {
+      this.get('selectedService.configs').forEach(function (property) {
+        var type = App.config.getConfigTagFromFileName(property.fileName);
+        configs[type][property.name] = property.value;
+      }, this);
+    }
+  },
+
   submit: function() {
     App.get('router.mainAdminKerberosController').getKDCSessionState(function() {
       App.router.send("next");

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/controllers/main/service/reassign/step4_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/reassign/step4_controller.js b/ambari-web/app/controllers/main/service/reassign/step4_controller.js
index c9cc28f..b383da7 100644
--- a/ambari-web/app/controllers/main/service/reassign/step4_controller.js
+++ b/ambari-web/app/controllers/main/service/reassign/step4_controller.js
@@ -70,267 +70,19 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
 
   hostComponents: [],
 
-  /**
-   * List of components, that do not need reconfiguration for moving to another host
-   * Reconfigure command will be skipped
-   */
-  componentsWithoutReconfiguration: ['METRICS_COLLECTOR'],
-
-  /**
-   * Map with lists of related services.
-   * Used to define list of services to stop/start.
-   */
-  relatedServicesMap: {
-    'JOBTRACKER': ['PIG', 'OOZIE'],
-    'RESOURCEMANAGER': ['YARN', 'MAPREDUCE2', 'TEZ', 'PIG', 'OOZIE', 'SLIDER', 'SPARK'],
-    'APP_TIMELINE_SERVER': ['YARN', 'MAPREDUCE2', 'TEZ', 'OOZIE', 'SLIDER', 'SPARK'],
-    'HIVE_SERVER': ['HIVE', 'FALCON', 'ATLAS', 'OOZIE'],
-    'HIVE_METASTORE': ['HIVE', 'PIG', 'FALCON', 'ATLAS', 'OOZIE'],
-    'WEBHCAT_SERVER': ['HIVE'],
-    'OOZIE_SERVER': ['OOZIE', 'FALCON', 'KNOX'],
-    'MYSQL_SERVER': ['HIVE', 'OOZIE', 'RANGER', 'RANGER_KMS'],
-    'METRICS_COLLECTOR': ['AMBARI_METRICS']
-  },
-
   dbPropertyMap: {
-    'HIVE_SERVER': 'javax.jdo.option.ConnectionDriverName',
-    'HIVE_METASTORE': 'javax.jdo.option.ConnectionDriverName',
-    'OOZIE_SERVER': 'oozie.service.JPAService.jdbc.url'
-  },
-
-  /**
-   * additional configs with template values
-   * Part of value to substitute has following format: "<replace-value>"
-   */
-  additionalConfigsMap: [
-    {
-      componentName: 'RESOURCEMANAGER',
-      configs: {
-        'yarn-site': {
-          'yarn.resourcemanager.address': '<replace-value>:8050',
-          'yarn.resourcemanager.admin.address': '<replace-value>:8141',
-          'yarn.resourcemanager.resource-tracker.address': '<replace-value>:8025',
-          'yarn.resourcemanager.scheduler.address': '<replace-value>:8030',
-          'yarn.resourcemanager.webapp.address': '<replace-value>:8088',
-          'yarn.resourcemanager.hostname': '<replace-value>'
-        }
-      }
-    },
-    {
-      componentName: 'JOBTRACKER',
-      configs: {
-        'mapred-site': {
-          'mapred.job.tracker.http.address': '<replace-value>:50030',
-          'mapred.job.tracker': '<replace-value>:50300'
-        }
-      }
-    },
-    {
-      componentName: 'SECONDARY_NAMENODE',
-      configs: {
-        'hdfs-site': {
-          'dfs.secondary.http.address': '<replace-value>:50090'
-        }
-      },
-      configs_Hadoop2: {
-        'hdfs-site': {
-          'dfs.namenode.secondary.http-address': '<replace-value>:50090'
-        }
-      }
-    },
-    {
-      componentName: 'NAMENODE',
-      configs: {
-        'hdfs-site': {
-          'dfs.http.address': '<replace-value>:50070',
-          'dfs.https.address': '<replace-value>:50470'
-        },
-        'core-site': {
-          'fs.default.name': 'hdfs://<replace-value>:8020'
-        }
-      },
-      configs_Hadoop2: {
-        'hdfs-site': {
-          'dfs.namenode.rpc-address': '<replace-value>:8020',
-          'dfs.namenode.http-address': '<replace-value>:50070',
-          'dfs.namenode.https-address': '<replace-value>:50470'
-        },
-        'core-site': {
-          'fs.defaultFS': 'hdfs://<replace-value>:8020'
-        }
-      }
-    },
-    {
-      componentName: 'APP_TIMELINE_SERVER',
-      configs: {
-        'yarn-site': {
-          'yarn.timeline-service.webapp.address': '<replace-value>:8188',
-          'yarn.timeline-service.webapp.https.address': '<replace-value>:8190',
-          'yarn.timeline-service.address': '<replace-value>:10200'
-        }
-      }
-    },
-    {
-      componentName: 'OOZIE_SERVER',
-        configs: {
-          'oozie-site': {
-            'oozie.base.url': 'http://<replace-value>:11000/oozie'
-          },
-          'core-site': {
-            'hadoop.proxyuser.oozie.hosts': '<replace-value>'
-          }
-        }
-    },
-    {
-      componentName: 'HIVE_METASTORE',
-      configs: {
-        'hive-site': {}
-      }
-    },
-    {
-      componentName: 'MYSQL_SERVER',
-      configs: {
-        'hive-site': {
-          'javax.jdo.option.ConnectionURL': 'jdbc:mysql://<replace-value>/hive?createDatabaseIfNotExist=true'
-        }
-      }
-    },
-    {
-      componentName: 'HISTORYSERVER',
-      configs: {
-        'mapred-site': {
-          'mapreduce.jobhistory.webapp.address': '<replace-value>:19888',
-          'mapreduce.jobhistory.address': '<replace-value>:10020'
-        }
-      }
-    }
-  ],
-
-  secureConfigsMap: [
-    {
-      componentName: 'NAMENODE',
-      configs: [
-        {
-          site: 'hdfs-site',
-          keytab: 'dfs.namenode.keytab.file',
-          principal: 'dfs.namenode.kerberos.principal'
-        },
-        {
-          site: 'hdfs-site',
-          keytab: 'dfs.web.authentication.kerberos.keytab',
-          principal: 'dfs.web.authentication.kerberos.principal'
-        }
-      ]
-    },
-    {
-      componentName: 'SECONDARY_NAMENODE',
-      configs: [
-        {
-          site: 'hdfs-site',
-          keytab: 'dfs.secondary.namenode.keytab.file',
-          principal: 'dfs.secondary.namenode.kerberos.principal'
-        },
-        {
-          site: 'hdfs-site',
-          keytab: 'dfs.web.authentication.kerberos.keytab',
-          principal: 'dfs.web.authentication.kerberos.principal'
-        }
-      ]
+    'HIVE_SERVER': {
+      type: 'hive-site',
+      name: 'javax.jdo.option.ConnectionDriverName'
     },
-    {
-      componentName: 'RESOURCEMANAGER',
-      configs: [
-        {
-          site: 'yarn-site',
-          keytab: 'yarn.resourcemanager.keytab',
-          principal: 'yarn.resourcemanager.principal'
-        },
-        {
-          site: 'yarn-site',
-          keytab: 'yarn.resourcemanager.webapp.spnego-keytab-file',
-          principal: 'yarn.resourcemanager.webapp.spnego-principal'
-        }
-      ]
-    },
-    {
-      componentName: 'OOZIE_SERVER',
-      configs: [
-        {
-          site: 'oozie-site',
-          keytab: 'oozie.authentication.kerberos.keytab',
-          principal: 'oozie.authentication.kerberos.principal'
-        },
-        {
-          site: 'oozie-site',
-          keytab: 'oozie.service.HadoopAccessorService.keytab.file',
-          principal: 'oozie.service.HadoopAccessorService.kerberos.principal'
-        }
-      ]
-    },
-    {
-      componentName: 'WEBHCAT_SERVER',
-      configs: [
-        {
-          site: 'webhcat-site',
-          keytab: 'templeton.kerberos.keytab',
-          principal: 'templeton.kerberos.principal'
-        }
-      ]
+    'HIVE_METASTORE': {
+      type: 'hive-site',
+      name: 'javax.jdo.option.ConnectionDriverName'
     },
-    {
-      componentName: 'HIVE_SERVER',
-      configs: [
-        {
-          site: 'hive-site',
-          keytab: 'hive.server2.authentication.kerberos.keytab',
-          principal: 'hive.server2.authentication.kerberos.principal'
-        },
-        {
-          site: 'hive-site',
-          keytab: 'hive.server2.authentication.spnego.keytab',
-          principal: 'hive.server2.authentication.spnego.principal'
-        }
-      ]
-    },
-    {
-      componentName: 'HIVE_METASTORE',
-      configs: [
-        {
-          site: 'hive-site',
-          keytab: 'hive.metastore.kerberos.keytab.file',
-          principal: 'hive.metastore.kerberos.principal'
-        }
-      ]
-    }
-
-  ],
-
-  /**
-   * set additional configs
-   * configs_Hadoop2 - configs which belongs to Hadoop 2 stack only
-   * @param configs
-   * @param componentName
-   * @param replaceValue
-   * @return {Boolean}
-   */
-  setAdditionalConfigs: function (configs, componentName, replaceValue) {
-    var component = this.get('additionalConfigsMap').findProperty('componentName', componentName);
-
-    if (Em.isNone(component)) return false;
-    var additionalConfigs = (component.configs_Hadoop2) ? component.configs_Hadoop2 : component.configs;
-
-    for (var site in additionalConfigs) {
-      if (additionalConfigs.hasOwnProperty(site)) {
-        for (var property in additionalConfigs[site]) {
-          if (additionalConfigs[site].hasOwnProperty(property)) {
-            if (App.get('isHaEnabled') && componentName === 'NAMENODE' && (property === 'fs.defaultFS' || property === 'dfs.namenode.rpc-address')) continue;
-
-            configs[site][property] = additionalConfigs[site][property].replace('<replace-value>', replaceValue);
-          }
-        }
-      }
+    'OOZIE_SERVER': {
+      type: 'oozie-site',
+      name: 'oozie.service.JPAService.jdbc.url'
     }
-    return true;
   },
 
   /**
@@ -397,7 +149,7 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
       this.removeTasks(['startZooKeeperServers', 'startNameNode']);
     }
 
-    if (this.get('componentsWithoutReconfiguration').contains(componentName)) {
+    if (!this.get('wizardController.isComponentWithReconfiguration')) {
       this.removeTasks(['reconfigure']);
     }
 
@@ -457,7 +209,7 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
    * make server call to stop services
    */
   stopRequiredServices: function () {
-    this.stopServices(this.get('relatedServicesMap')[this.get('content.reassign.component_name')], true);
+    this.stopServices(this.get('wizardController.relatedServicesMap')[this.get('content.reassign.component_name')], true);
   },
 
   createHostComponents: function () {
@@ -502,309 +254,11 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
   },
 
   reconfigure: function () {
-    this.loadConfigsTags();
-  },
-
-  loadConfigsTags: function () {
-    App.ajax.send({
-      name: 'config.tags',
-      sender: this,
-      success: 'onLoadConfigsTags',
-      error: 'onTaskError'
-    });
-  },
-
-  serviceToConfigSiteMap: {
-    'NAMENODE': ['hdfs-site', 'core-site'],
-    'SECONDARY_NAMENODE': ['hdfs-site', 'core-site'],
-    'JOBTRACKER': ['mapred-site'],
-    'RESOURCEMANAGER': ['yarn-site'],
-    'WEBHCAT_SERVER': ['hive-env', 'webhcat-site', 'core-site'],
-    'APP_TIMELINE_SERVER': ['yarn-site', 'yarn-env'],
-    'OOZIE_SERVER': ['oozie-site', 'core-site', 'oozie-env'],
-    'HIVE_SERVER': ['hive-site', 'webhcat-site', 'hive-env', 'core-site'],
-    'HIVE_METASTORE': ['hive-site', 'webhcat-site', 'hive-env', 'core-site'],
-    'MYSQL_SERVER': ['hive-site'],
-    'HISTORYSERVER': ['mapred-site']
-  },
-
-  /**
-   * construct URL parameters for config call
-   * @param componentName
-   * @param data
-   * @return {Array}
-   */
-  getConfigUrlParams: function (componentName, data) {
-    var urlParams = [];
-
-    this.get('serviceToConfigSiteMap')[componentName].forEach(function(site){
-      urlParams.push('(type=' + site + '&tag=' + data.Clusters.desired_configs[site].tag + ')');
-    });
-
-    // specific cases for NameNode component
-    if (componentName === 'NAMENODE') {
-        if (App.Service.find().someProperty('serviceName', 'HBASE')) {
-          urlParams.push('(type=hbase-site&tag=' + data.Clusters.desired_configs['hbase-site'].tag + ')');
-        }
-        if (App.Service.find().someProperty('serviceName', 'ACCUMULO')) {
-          urlParams.push('(type=accumulo-site&tag=' + data.Clusters.desired_configs['accumulo-site'].tag + ')');
-        }
-        if (App.Service.find().someProperty('serviceName', 'HAWQ')) {
-          urlParams.push('(type=hawq-site&tag=' + data.Clusters.desired_configs['hawq-site'].tag + ')');
-          urlParams.push('(type=hdfs-client&tag=' + data.Clusters.desired_configs['hdfs-client'].tag + ')');
-        }
-    }
-
-    if (componentName === 'RESOURCEMANAGER') {
-        if (App.Service.find().someProperty('serviceName', 'HAWQ')) {
-          urlParams.push('(type=hawq-site&tag=' + data.Clusters.desired_configs['hawq-site'].tag + ')');
-          urlParams.push('(type=yarn-client&tag=' + data.Clusters.desired_configs['yarn-client'].tag + ')');
-        }
-    }
-
-    return urlParams;
-  },
-
-  onLoadConfigsTags: function (data) {
-    var urlParams = this.getConfigUrlParams(this.get('content.reassign.component_name'), data);
-
-    App.ajax.send({
-      name: 'reassign.load_configs',
-      sender: this,
-      data: {
-        urlParams: urlParams.join('|')
-      },
-      success: 'onLoadConfigs',
-      error: 'onTaskError'
-    });
-  },
-
-  /**
-   *
-   * @returns {extendedTopologyLocalDB}
-   * @private
-   * @method _prepareTopologyDB
-   */
-  _prepareTopologyDB: function () {
-    var ret = this.get('content').getProperties(['masterComponentHosts', 'slaveComponentHosts', 'hosts']);
-    ret.installedServices = App.Service.find().mapProperty('serviceName');
-    return ret;
-  },
-
-  /**
-   * Create dependencies for Config Initializers
-   *
-   * @param {object} additionalDependencies  some additional information that should be added
-   * @returns {reassignComponentDependencies}
-   * @private
-   * @method _prepareDependencies
-   */
-  _prepareDependencies: function (additionalDependencies) {
-    additionalDependencies = additionalDependencies || {};
-    var ret = {};
-    ret.sourceHostName = this.get('content.reassignHosts.source');
-    ret.targetHostName = this.get('content.reassignHosts.target');
-    return Em.merge(ret, additionalDependencies);
-  },
-
-  /**
-   * Get additional dependencies-data for App.MoveRmConfigInitializer
-   *
-   * @param {object} configs
-   * @returns {object}
-   * @private
-   * @method _getRmAdditionalDependencies
-   */
-  _getRmAdditionalDependencies: function (configs) {
-    var ret = {};
-    var rm1 = configs['yarn-site']['yarn.resourcemanager.hostname.rm1'];
-    if (rm1) {
-      ret.rm1 = rm1;
-    }
-    var rm2 = configs['yarn-site']['yarn.resourcemanager.hostname.rm2'];
-    if (rm2) {
-      ret.rm2 = rm2;
-    }
-    return ret;
-  },
-
-  /**
-   * Settings used to the App.MoveOSConfigInitializer setup
-   *
-   * @param {object} configs
-   * @returns {object}
-   * @private
-   * @method _getOsInitializerSettings
-   */
-  _getOsInitializerSettings: function (configs) {
-    var ret = {};
-    var cfg = configs['oozie-env']['oozie_user'];
-    if (cfg) {
-      ret.oozieUser = cfg;
-    }
-    return ret;
-  },
-
-  /**
-   * Get additional dependencies-data for App.MoveNameNodeConfigInitializer
-   *
-   * @param {object} configs
-   * @returns {object}
-   * @private
-   * @method _getNnInitializerSettings
-   */
-  _getNnInitializerSettings: function (configs) {
-    var ret = {};
-    if (App.get('isHaEnabled')) {
-      ret.namespaceId = configs['hdfs-site']['dfs.nameservices'];
-      ret.suffix = (configs['hdfs-site']['dfs.namenode.http-address.' + ret.namespaceId + '.nn1'].indexOf(this.get('content.reassignHosts.source')) != -1) ? 'nn1' : 'nn2';
-    }
-    return ret;
-  },
-
-  /**
-   * Settings used to the App.MoveHsConfigInitializer and App.MoveHmConfigInitializer setup
-   *
-   * @param {object} configs
-   * @returns {{hiveUser: string}}
-   * @private
-   * @method _getHiveInitializerSettings
-   */
-  _getHiveInitializerSettings: function (configs) {
-    return {
-      hiveUser: configs['hive-env']['hive_user']
-    };
-  },
-
-  /**
-   * Settings used to the App.MoveWsConfigInitializer setup
-   *
-   * @param {object} configs
-   * @returns {{webhcatUser: string}}
-   * @private
-   * @method _getWsInitializerSettings
-   */
-  _getWsInitializerSettings: function (configs) {
-    return {
-      webhcatUser: configs['hive-env']['webhcat_user']
-    };
-  },
-
-  /**
-   * Settings used to the App.MoveRmConfigInitializer setup
-   *
-   * @param {object} configs
-   * @returns {{suffix: string}}
-   * @private
-   * @method _getRmInitializerSettings
-   */
-  _getRmInitializerSettings: function (configs) {
-    return {
-      suffix: configs['yarn-site']['yarn.resourcemanager.hostname.rm1'] === this.get('content.reassignHosts.source') ? 'rm1': 'rm2'
-    };
-  },
-
-  onLoadConfigs: function (data) {
-    // Find hawq-site.xml location
-    var hawqSiteIndex = -1;
-    for(var i = 0; i < data.items.length; i++){
-      if(data.items[i].type == 'hawq-site'){
-        hawqSiteIndex = i;
-        break;
-      }
-    }
-
-    // if certain services are deployed, include related site files to additionalConfigsMap and relatedServicesMap.
-    if(hawqSiteIndex >= 0){ // if HAWQ is deployed
-      var hawqSiteProperties = {
-        'hawq_rm_yarn_address': '<replace-value>:8050',
-        'hawq_rm_yarn_scheduler_address': '<replace-value>:8030'
-      }
-
-      var rmComponent = this.get('additionalConfigsMap').findProperty('componentName', "RESOURCEMANAGER");
-      rmComponent.configs["hawq-site"] = hawqSiteProperties;
-
-      if(data.items[hawqSiteIndex].properties["hawq_global_rm_type"].toLowerCase() === "yarn"){
-        this.get('relatedServicesMap')['RESOURCEMANAGER'].append('HAWQ');
-      }
-
-    }
-
-    var componentName = this.get('content.reassign.component_name');
-    var targetHostName = this.get('content.reassignHosts.target');
-    var configs = {};
-    var secureConfigs = [];
-
-    data.items.forEach(function (item) {
-      configs[item.type] = item.properties;
-    }, this);
-
-    this.setAdditionalConfigs(configs, componentName, targetHostName);
-    this.setSecureConfigs(secureConfigs, configs, componentName);
-
-    switch (componentName) {
-      case 'NAMENODE':
-        App.MoveNameNodeConfigInitializer.setup(this._getNnInitializerSettings(configs));
-        configs = this.setDynamicConfigs(configs, App.MoveNameNodeConfigInitializer);
-        App.MoveNameNodeConfigInitializer.cleanup();
-        break;
-      case 'RESOURCEMANAGER':
-        App.MoveRmConfigInitializer.setup(this._getRmInitializerSettings(configs));
-        var additionalDependencies = this._getRmAdditionalDependencies(configs);
-        configs = this.setDynamicConfigs(configs, App.MoveRmConfigInitializer, additionalDependencies);
-        App.MoveRmConfigInitializer.cleanup();
-        break;
-      case 'HIVE_METASTORE':
-        App.MoveHmConfigInitializer.setup(this._getHiveInitializerSettings(configs));
-        configs = this.setDynamicConfigs(configs, App.MoveHmConfigInitializer);
-        App.MoveHmConfigInitializer.cleanup();
-        break;
-      case 'HIVE_SERVER':
-        App.MoveHsConfigInitializer.setup(this._getHiveInitializerSettings(configs));
-        configs = this.setDynamicConfigs(configs, App.MoveHsConfigInitializer);
-        App.MoveHsConfigInitializer.cleanup();
-        break;
-      case 'WEBHCAT_SERVER':
-        App.MoveWsConfigInitializer.setup(this._getWsInitializerSettings(configs));
-        configs = this.setDynamicConfigs(configs, App.MoveWsConfigInitializer);
-        App.MoveWsConfigInitializer.cleanup();
-        break;
-      case 'OOZIE_SERVER':
-        App.MoveOSConfigInitializer.setup(this._getOsInitializerSettings(configs));
-        configs = this.setDynamicConfigs(configs, App.MoveOSConfigInitializer);
-        App.MoveOSConfigInitializer.cleanup();
-    }
-
+    var configs = this.get('content.configs'),
+      secureConfigs = this.get('content.secureConfigs'),
+      componentName = this.get('content.reassign.component_name');
     this.saveClusterStatus(secureConfigs, this.getComponentDir(configs, componentName));
     this.saveConfigsToServer(configs);
-    this.saveServiceProperties(configs);
-  },
-
-  /**
-   * Set config values according to the new cluster topology
-   *
-   * @param {object} configs
-   * @param {MoveComponentConfigInitializerClass} initializer
-   * @param {object} [additionalDependencies={}]
-   * @returns {object}
-   * @method setDynamicConfigs
-   */
-  setDynamicConfigs: function (configs, initializer, additionalDependencies) {
-    additionalDependencies = additionalDependencies || {};
-    var topologyDB = this._prepareTopologyDB();
-    var dependencies = this._prepareDependencies(additionalDependencies);
-    Em.keys(configs).forEach(function (site) {
-      Em.keys(configs[site]).forEach(function (config) {
-        // temporary object for initializer
-        var cfg = {
-          name: config,
-          filename: site,
-          value: configs[site][config]
-        };
-        configs[site][config] = initializer.initialValue(cfg, topologyDB, dependencies).value;
-      });
-    });
-    return configs;
   },
 
   /**
@@ -884,27 +338,6 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
   },
 
   /**
-   * set secure configs for component
-   * @param secureConfigs
-   * @param configs
-   * @param componentName
-   * @return {Boolean}
-   */
-  setSecureConfigs: function (secureConfigs, configs, componentName) {
-    var securityEnabled = App.get('isKerberosEnabled');
-    var component = this.get('secureConfigsMap').findProperty('componentName', componentName);
-    if (Em.isNone(component) || !securityEnabled) return false;
-
-    component.configs.forEach(function (config) {
-      secureConfigs.push({
-        keytab: configs[config.site][config.keytab],
-        principal: configs[config.site][config.principal]
-      });
-    });
-    return true;
-  },
-
-  /**
    * derive component directory from configurations
    * @param configs
    * @param componentName
@@ -966,7 +399,7 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
    * make server call to start services
    */
   startRequiredServices: function () {
-    var relatedServices = this.get('relatedServicesMap')[this.get('content.reassign.component_name')];
+    var relatedServices = this.get('wizardController.relatedServicesMap')[this.get('content.reassign.component_name')];
     if (relatedServices) {
       this.startServices(false, relatedServices, true);
     } else {
@@ -1150,10 +583,11 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
   }.property('propertiesPattern'),
 
   getConnectionProperty: function(regexp) {
-    var propertyName = this.get('requiredProperties').filter(function(item) {
+    var configType = this.get('requiredProperties.type'),
+      propertyName = this.get('requiredProperties.names').filter(function(item) {
       return regexp.test(item);
     })[0];
-    return this.get('content.serviceProperties')[propertyName];
+    return Em.getWithDefault(this.get('content.configs'), configType, {})[propertyName];
   },
 
   /**
@@ -1174,18 +608,35 @@ App.ReassignMasterWizardStep4Controller = App.HighAvailabilityProgressPageContro
   /** @property {object} requiredProperties - properties that necessary for database connection **/
   requiredProperties: function() {
     var propertiesMap = {
-      OOZIE: ['oozie.db.schema.name','oozie.service.JPAService.jdbc.username','oozie.service.JPAService.jdbc.password','oozie.service.JPAService.jdbc.driver','oozie.service.JPAService.jdbc.url'],
-      HIVE: ['ambari.hive.db.schema.name','javax.jdo.option.ConnectionUserName','javax.jdo.option.ConnectionPassword','javax.jdo.option.ConnectionDriverName','javax.jdo.option.ConnectionURL']
+      OOZIE: {
+        type: 'oozie-site',
+        names: ['oozie.db.schema.name', 'oozie.service.JPAService.jdbc.username', 'oozie.service.JPAService.jdbc.password', 'oozie.service.JPAService.jdbc.driver', 'oozie.service.JPAService.jdbc.url']
+      },
+      HIVE: {
+        type: 'hive-site',
+        names: ['ambari.hive.db.schema.name', 'javax.jdo.option.ConnectionUserName', 'javax.jdo.option.ConnectionPassword', 'javax.jdo.option.ConnectionDriverName', 'javax.jdo.option.ConnectionURL']
+      }
     };
 
     return propertiesMap[this.get('content.reassign.service_id')];
   }.property(),
 
   dbType: function() {
-    var databaseTypes = /MySQL|PostgreS|Oracle|Derby|MSSQL|Anywhere/gi;
-    var databaseProp = this.get('content.serviceProperties')[Em.getWithDefault(this.get('dbPropertyMap'), this.get('content.reassign.component_name'), null)];
+    var databaseTypes = /MySQL|PostgreS|Oracle|Derby|MSSQL|Anywhere/gi,
+      dbPropertyMapItem = Em.getWithDefault(this.get('dbPropertyMap'), this.get('content.reassign.component_name'), null),
+      databasePropMatch,
+      databaseProp,
+      result;
+
+    if (dbPropertyMapItem) {
+      databaseProp = Em.getWithDefault(this.get('content.configs'), dbPropertyMapItem.type, {})[dbPropertyMapItem.name];
+      databasePropMatch = databaseProp && databaseProp.match(databaseTypes);
+      if (databasePropMatch) {
+        result = databasePropMatch[0];
+      }
+    }
 
-    return databaseProp.match(databaseTypes)[0];
+    return result;
   }.property(),
 
   prepareDBCheckAction: function() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/controllers/main/service/reassign_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/reassign_controller.js b/ambari-web/app/controllers/main/service/reassign_controller.js
index 07d6e2c..e512835 100644
--- a/ambari-web/app/controllers/main/service/reassign_controller.js
+++ b/ambari-web/app/controllers/main/service/reassign_controller.js
@@ -131,6 +131,8 @@ App.ReassignMasterController = App.WizardController.extend({
           this.loadTasksRequestIds();
           this.loadRequestIds();
           this.loadReassignComponentsInMM();
+          this.loadConfigs();
+          this.loadSecureConfigs();
         }
       }
     ],
@@ -138,13 +140,46 @@ App.ReassignMasterController = App.WizardController.extend({
       {
         type: 'sync',
         callback: function () {
-          this.loadSecureConfigs();
           this.loadComponentDir();
         }
       }
     ]
   },
 
+  serviceToConfigSiteMap: {
+    'NAMENODE': ['hdfs-site', 'core-site'],
+    'SECONDARY_NAMENODE': ['hdfs-site', 'core-site'],
+    'JOBTRACKER': ['mapred-site'],
+    'RESOURCEMANAGER': ['yarn-site'],
+    'WEBHCAT_SERVER': ['hive-env', 'webhcat-site', 'core-site'],
+    'APP_TIMELINE_SERVER': ['yarn-site', 'yarn-env'],
+    'OOZIE_SERVER': ['oozie-site', 'core-site', 'oozie-env'],
+    'HIVE_SERVER': ['hive-site', 'webhcat-site', 'hive-env', 'core-site'],
+    'HIVE_METASTORE': ['hive-site', 'webhcat-site', 'hive-env', 'core-site'],
+    'MYSQL_SERVER': ['hive-site'],
+    'HISTORYSERVER': ['mapred-site']
+  },
+
+  /**
+   * Map with lists of related services.
+   * Used to define list of services to stop/start.
+   */
+  relatedServicesMap: {
+    'JOBTRACKER': ['PIG', 'OOZIE'],
+    'RESOURCEMANAGER': ['YARN', 'MAPREDUCE2', 'TEZ', 'PIG', 'OOZIE', 'SLIDER', 'SPARK'],
+    'APP_TIMELINE_SERVER': ['YARN', 'MAPREDUCE2', 'TEZ', 'OOZIE', 'SLIDER', 'SPARK'],
+    'HIVE_SERVER': ['HIVE', 'FALCON', 'ATLAS', 'OOZIE'],
+    'HIVE_METASTORE': ['HIVE', 'PIG', 'FALCON', 'ATLAS', 'OOZIE'],
+    'WEBHCAT_SERVER': ['HIVE'],
+    'OOZIE_SERVER': ['OOZIE', 'FALCON', 'KNOX'],
+    'MYSQL_SERVER': ['HIVE', 'OOZIE', 'RANGER', 'RANGER_KMS'],
+    'METRICS_COLLECTOR': ['AMBARI_METRICS']
+  },
+
+  isComponentWithReconfiguration: function () {
+    return this.get('serviceToConfigSiteMap').hasOwnProperty(this.get('content.reassign.component_name'));
+  }.property('content.reassign.component_name'),
+
   addManualSteps: function () {
     var hasManualSteps = this.get('content.componentsWithManualCommands').contains(this.get('content.reassign.component_name'));
     this.set('content.hasManualSteps', hasManualSteps);
@@ -285,6 +320,16 @@ App.ReassignMasterController = App.WizardController.extend({
     this.set('content.serviceProperties', serviceProperties);
   },
 
+  saveConfigs: function (configs) {
+    this.setDBProperty('configs', configs);
+    this.set('content.configs', configs);
+  },
+
+  loadConfigs: function () {
+    var configs = this.getDBProperty('configs');
+    this.set('content.configs', configs);
+  },
+
   saveDatabaseType: function (type) {
     this.setDBProperty('databaseType', type);
     this.set('content.databaseType', type);

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 7ed5826..cacb798 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -1316,7 +1316,8 @@ Em.I18n.translations = {
   'admin.manageJournalNode.wizard.step8.header': 'Start All Services',
 
   'admin.manageJournalNode.wizard.step1.body': 'Add, or Remove JournalNodes',
-  'admin.manageJournalNode.wizard.step3.confirm.config.body': '<b>Configuration Change Review.</b></br>' +
+  'admin.manageJournalNode.wizard.step3.confirm.hosts.body': '<b>Confirm your host selections.</b>',
+  'admin.manageJournalNode.wizard.step3.confirm.config.body': '<p><b>Review Configuration Changes.</b></p>' +
   'As part of this process, configuration changes are required. Please review the changes below, and note that they are for <b>review only</b>.  Future steps in this wizard will update this configuration, and restart <b>all</b> services automatically.',
 
   'admin.manageJournalNode.wizard.step4.task0.title' : 'Stop Standby NameNode',
@@ -1541,7 +1542,7 @@ Em.I18n.translations = {
     '</ol>',
   'admin.highAvailability.wizard.step3.confirm.host.body':'<b>Confirm your host selections.</b>',
   'admin.highAvailability.wizard.step3.confirm.config.body':'<div class="alert alert-info">' +
-    '<b>Review Configuration Changes.</b></br>' +
+    '<p><b>Review Configuration Changes.</b></p>' +
     'The following lists the configuration changes that will be made by the Wizard to enable NameNode HA. This information is for <b> review only </b> and is not editable except for the  <b>dfs.journalnode.edits.dir</b> property' +
     '</div>',
   'admin.highAvailability.wizard.step2.body':'Select a host that will be running the additional NameNode.<br/> In addition,' +
@@ -1568,7 +1569,7 @@ Em.I18n.translations = {
   'admin.rm_highAvailability.wizard.step3.header': 'Review',
   'admin.rm_highAvailability.wizard.step3.confirm.host.body':'<b>Confirm your host selections.</b>',
   'admin.rm_highAvailability.wizard.step3.confirm.config.body':'<div class="alert alert-info">' +
-      '<b>Review Configuration Changes.</b></br>' +
+      '<p><b>Review Configuration Changes.</b></p>' +
       'The following lists the configuration changes that will be made by the Wizard to enable ResourceManager HA. This information is for <b> review only </b> and is not editable.' +
       '</div>',
   'admin.rm_highAvailability.wizard.step3.currentRM': 'Current ResourceManager',
@@ -1600,7 +1601,7 @@ Em.I18n.translations = {
   'admin.ra_highAvailability.wizard.step3.alert_message': '<b>Confirm your host selections.</b>',
   'admin.ra_highAvailability.wizard.step3.currentRA': 'Current Ranger Admin',
   'admin.ra_highAvailability.wizard.step3.additionalRA': 'Additional Ranger Admin',
-  'admin.rm_highAvailability.wizard.step3.configs_changes': '<b>Review Configuration Changes.</b></br>' +
+  'admin.rm_highAvailability.wizard.step3.configs_changes': '<p><b>Review Configuration Changes.</b></p>' +
   '<i>policymgr_external_url</i> in admin-properties.xml will be changed by the Wizard to enable Ranger Admin HA',
   'admin.ra_highAvailability.wizard.step4.header': 'Install, Start and Test',
   'admin.ra_highAvailability.wizard.step4.task0.title': 'Stop All Services',
@@ -2284,10 +2285,12 @@ Em.I18n.translations = {
   'services.reassign.step2.body':'Assign {0} to new host.',
   'services.reassign.step2.body.namenodeHA':'Move {0} to new host. You can move only one master component at a time.',
   'services.reassign.step3.header':'Review',
-  'services.reassign.step3.body':'Please review the changes you made',
+  'services.reassign.step3.body':'<b>Confirm your host selections.</b>',
   'services.reassign.step3.targetHost':'Target Host:',
   'services.reassign.step3.sourceHost':'Source Host:',
   'services.reassign.step3.component':'Component name:',
+  'services.reassign.step3.configs':'<div class="alert alert-info">' +
+    '<p><b>Review Configuration Changes.</b></p>The Wizard will make the following configuration changes.</div>',
   'services.reassign.step4.header':'Configure Component',
 
   'services.reassign.step4.tasks.stopRequiredServices.title':'Stop Required Services',
@@ -3245,8 +3248,8 @@ Em.I18n.translations = {
   'admin.addHawqStandby.wizard.step3.header': 'Review',
   'admin.addHawqStandby.wizard.step3.configs_changes': 'Review Configuration Changes.',
   'admin.addHawqStandby.wizard.step3.confirm.host.body':'<b>Confirm your host selections.</b>',
-  'admin.addHawqStandby.wizard.step3.confirm.config.body':'<div class="alert alert-info">' +
-      '<b>Review Configuration Changes.</b><br/><br/>' +
+  'admin.addHawqStandby.wizard.step3.confirm.config.body':'<p class="alert alert-info">' +
+      '<p><b>Review Configuration Changes.</b></p>' +
       'The following lists the configuration changes that will be made by the Wizard to add HAWQ Standby Master. ' +
       'This information is for <b> review only </b> and is not editable.</div>',
   'admin.addHawqStandby.wizard.step3.hawqMaster': 'Current HAWQ Master',
@@ -3277,7 +3280,7 @@ Em.I18n.translations = {
   'admin.removeHawqStandby.wizard.step2.header': 'Review',
   'admin.removeHawqStandby.wizard.step2.hawqStandby': '<b>Current HAWQ Standby:</b>',
   'admin.removeHawqStandby.wizard.step2.confirm.config.body':'<div class="alert alert-info">' +
-      '<b>Review Configuration Changes.</b></br></br>After removing the HAWQ Standby Master, the Wizard removes the ' +
+      '<p><b>Review Configuration Changes.</b></p>After removing the HAWQ Standby Master, the Wizard removes the ' +
       'hawq_standby_address_host property from hawq-site.xml. As a best practice, you should configure a new HAWQ Standby Master host after the Wizard completes.</div>',
   'admin.removeHawqStandby.wizard.step2.confirm.host.body':'<b>Review HAWQ Standby Master role changes.</b>',
   'admin.removeHawqStandby.wizard.step2.confirmPopup.body': 'Do you wish to continue with removing HAWQ Standby Master? Please confirm, before proceeding as you will not be able to rollback from Ambari.',
@@ -3312,7 +3315,7 @@ Em.I18n.translations = {
   'admin.activateHawqStandby.wizard.step2.toBeActivated': 'TO BE ACTIVATED AS NEW HAWQ MASTER',
   'admin.activateHawqStandby.step4.save.configuration.note': 'This configuration is created by Activate HAWQ Standby wizard',
   'admin.activateHawqStandby.wizard.step2.confirm.config.body': '<div class="alert alert-info">' +
-      '<b>Review Configuration Changes.</b><br/><br/>The Wizard will make the following configuration changes. '+
+      '<p><b>Review Configuration Changes.</b></p>The Wizard will make the following configuration changes. '+
       'This information is for review only, and cannot be edited.<br/><br/><b>After activating the HAWQ Standby ' +
       'Master, the wizard removes the hawq_standby_address_host property from hawq-site.xml.</b> ' +
       'As a best practice, you should configure a new HAWQ Standby Master host after the wizard completes.</div>',

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/routes/reassign_master_routes.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/routes/reassign_master_routes.js b/ambari-web/app/routes/reassign_master_routes.js
index 10ad0e9..a2a9743 100644
--- a/ambari-web/app/routes/reassign_master_routes.js
+++ b/ambari-web/app/routes/reassign_master_routes.js
@@ -170,16 +170,21 @@ module.exports = App.WizardRoute.extend({
   step3: Em.Route.extend({
     route: '/step3',
     connectOutlets: function (router) {
-      var controller = router.get('reassignMasterController');
+      var controller = router.get('reassignMasterController'),
+        stepController = router.get('reassignMasterWizardStep3Controller');
       controller.setCurrentStep('3');
       controller.dataLoading().done(function () {
         controller.loadAllPriorSteps();
+        stepController.set('wizardController', controller);
         controller.connectOutlet('reassignMasterWizardStep3', controller.get('content'));
       })
     },
     back: Em.Router.transitionTo('step2'),
     next: function (router) {
-      var controller = router.get('reassignMasterController');
+      var controller = router.get('reassignMasterController'),
+        stepController = router.get('reassignMasterWizardStep3Controller'),
+        configs = stepController.get('configs'),
+        secureConfigs = stepController.get('secureConfigs');
       App.db.setReassignTasksStatuses(undefined);
       App.db.setReassignTasksRequestIds(undefined);
       App.clusterStatus.setClusterStatus({
@@ -189,9 +194,16 @@ module.exports = App.WizardRoute.extend({
         localdb: App.db.data
       });
       controller.saveReassignComponentsInMM(controller.getReassignComponentsInMM());
+      stepController.updateServiceConfigs();
+      controller.saveConfigs(configs);
+      controller.saveSecureConfigs(secureConfigs);
       router.transitionTo('step4');
     },
 
+    exit: function (router) {
+      router.get('reassignMasterWizardStep3Controller').clearStep();
+    },
+
     unroutePath: function () {
       return false;
     }
@@ -200,11 +212,13 @@ module.exports = App.WizardRoute.extend({
   step4: Em.Route.extend({
     route: '/step4',
     connectOutlets: function (router) {
-      var controller = router.get('reassignMasterController');
+      var controller = router.get('reassignMasterController'),
+        stepController = router.get('reassignMasterWizardStep4Controller');
       controller.setCurrentStep('4');
       controller.setLowerStepsDisable(4);
       router.get('mainController').isLoading.call(router.get('clusterController'), 'isServiceContentFullyLoaded').done(function () {
         controller.loadAllPriorSteps();
+        stepController.set('wizardController', controller);
         controller.connectOutlet('reassignMasterWizardStep4', controller.get('content'));
       });
     },
@@ -304,11 +318,13 @@ module.exports = App.WizardRoute.extend({
   step7: Em.Route.extend({
     route: '/step7',
     connectOutlets: function (router) {
-      var controller = router.get('reassignMasterController');
+      var controller = router.get('reassignMasterController'),
+        stepController = router.get('reassignMasterWizardStep7Controller');
       controller.setCurrentStep('7');
       controller.setLowerStepsDisable(7);
       controller.dataLoading().done(function () {
         controller.loadAllPriorSteps();
+        stepController.set('wizardController', controller);
         controller.connectOutlet('reassignMasterWizardStep7', controller.get('content'));
       });
     },

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/styles/wizard.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/wizard.less b/ambari-web/app/styles/wizard.less
index 2dbdd03..579b21b 100644
--- a/ambari-web/app/styles/wizard.less
+++ b/ambari-web/app/styles/wizard.less
@@ -377,7 +377,7 @@
     margin-top: 8px;
   }
 
-  #ha-step3-review-table, #manage-journal-node-step2-review-table {
+  #ha-step3-review-table, #manage-journal-node-step2-review-table, #reassign-review-table {
     td {
       text-align: left;
       vertical-align: top;

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/templates/main/admin/highAvailability/journalNode/step2.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/highAvailability/journalNode/step2.hbs b/ambari-web/app/templates/main/admin/highAvailability/journalNode/step2.hbs
index 0cd177d..d537585 100644
--- a/ambari-web/app/templates/main/admin/highAvailability/journalNode/step2.hbs
+++ b/ambari-web/app/templates/main/admin/highAvailability/journalNode/step2.hbs
@@ -18,63 +18,66 @@
 
 <div class="wizard-content col-md-9">
   <h4 class="step-title">{{t admin.manageJournalNode.wizard.step2.header}}</h4>
+  <p class="step-description">
+    {{t admin.manageJournalNode.wizard.step3.confirm.hosts.body}}
+  </p>
   <div class="panel panel-default">
     <div class="panel-body">
-    <div id="manage-journal-node-step2-content" class="well pre-scrollable">
-      <div id="step8-info">
-        <table id="manage-journal-node-step2-review-table">
+      <div id="manage-journal-node-step2-content" class="well pre-scrollable">
+        <div id="step8-info">
+          <table id="manage-journal-node-step2-review-table">
             {{#if view.journalNodesToAdd.length}}
-            <tr>
+              <tr>
                 <td>{{t admin.highAvailability.wizard.step3.journalNode}}</td>
                 <td>
-                    <ul>
-                        {{#each item in view.journalNodesToAdd}}
-                            <li>{{item}}</li>
-                        {{/each}}
-                    </ul>
+                  <ul>
+                    {{#each item in view.journalNodesToAdd}}
+                      <li>{{item}}</li>
+                    {{/each}}
+                  </ul>
                 </td>
                 <td>
-                    <ul>
-                        {{#each item in view.journalNodesToAdd}}
-                            <li><span class="to-be-installed-green"><i class="icon-plus"></i>
+                  <ul>
+                    {{#each item in view.journalNodesToAdd}}
+                      <li><span class="to-be-installed-green"><i class="icon-plus"></i>
                                 &nbsp;{{t admin.highAvailability.wizard.step3.toBeInstalled}}</span></li>
-                        {{/each}}
-                    </ul>
+                    {{/each}}
+                  </ul>
                 </td>
-            </tr>
+              </tr>
             {{/if}}
 
             {{#if view.journalNodesToDelete.length}}
-            <tr>
+              <tr>
                 <td>{{t admin.highAvailability.wizard.step3.journalNode}}</td>
                 <td>
-                    <ul>
-                        {{#each item in view.journalNodesToDelete}}
-                            <li>{{item}}</li>
-                        {{/each}}
-                    </ul>
+                  <ul>
+                    {{#each item in view.journalNodesToDelete}}
+                      <li>{{item}}</li>
+                    {{/each}}
+                  </ul>
                 </td>
                 <td>
-                    <ul>
-                        {{#each item in view.journalNodesToDelete}}
-                            <li><span class="to-be-disabled-red"><i class="icon-minus"></i>
+                  <ul>
+                    {{#each item in view.journalNodesToDelete}}
+                      <li><span class="to-be-disabled-red"><i class="icon-minus"></i>
                                 &nbsp;{{t admin.highAvailability.wizard.step3.toBeDeleted}}</span></li>
-                        {{/each}}
-                    </ul>
+                    {{/each}}
+                  </ul>
                 </td>
-            </tr>
+              </tr>
             {{/if}}
-        </table>
+          </table>
+        </div>
       </div>
-    </div>
       <div id="serviceConfig">
         {{#if controller.isLoaded}}
           <div class="alert alert-info">
             {{{t admin.manageJournalNode.wizard.step3.confirm.config.body}}}
           </div>
-            {{view App.ServiceConfigView isNotEditableBinding="controller.isNotEditable"}}
+          {{view App.ServiceConfigView isNotEditableBinding="controller.isNotEditable"}}
         {{else}}
-            {{view App.SpinnerView}}
+          {{view App.SpinnerView}}
         {{/if}}
       </div>
     </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/templates/main/service/reassign/step3.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/reassign/step3.hbs b/ambari-web/app/templates/main/service/reassign/step3.hbs
index 1f88fb5..1f6a393 100644
--- a/ambari-web/app/templates/main/service/reassign/step3.hbs
+++ b/ambari-web/app/templates/main/service/reassign/step3.hbs
@@ -28,17 +28,37 @@
   <div class="panel panel-default">
     <div class="panel-body">
       <div id="step8-content" class="well pre-scrollable">
-        <div id="printReview">
-          <a class="btn btn-info pull-right" {{action printReview target="view"}}>{{t common.print}}</a> <br/>
-        </div>
         <div id="step8-info">
-          <p><b>{{t services.reassign.step3.component}}</b> {{controller.content.reassign.display_name}}</p>
-
-          <p><b>{{t services.reassign.step3.sourceHost}}</b> {{view.sourceHost}}</p>
-
-          <p><b>{{t services.reassign.step3.targetHost}}</b> {{view.targetHost}}</p>
+          <table id="reassign-review-table">
+            <tr>
+              <td><b>{{t services.reassign.step3.component}}</b></td>
+              <td colspan="2">{{controller.content.reassign.display_name}}</td>
+            </tr>
+            <tr>
+              <td><b>{{t services.reassign.step3.sourceHost}}</b></td>
+              <td>{{view.sourceHost}}</td>
+              <td><span class="to-be-disabled-red"><i class="glyphicon glyphicon-minus"></i>&nbsp;{{t admin.highAvailability.wizard.step3.toBeDeleted}}</span></td>
+            </tr>
+            <tr>
+              <td><b>{{t services.reassign.step3.targetHost}}</b></td>
+              <td>{{view.targetHost}}</td>
+              <td><span class="to-be-installed-green"><i class="glyphicon glyphicon-plus"></i>&nbsp;{{t admin.highAvailability.wizard.step3.toBeInstalled}}</span></td>
+            </tr>
+          </table>
         </div>
       </div>
+      {{#if wizardController.isComponentWithReconfiguration}}
+        {{#if isLoaded}}
+          {{#if stepConfigs.length}}
+            <div id="serviceConfig">
+              {{t services.reassign.step3.configs}}
+              {{view App.ServiceConfigView}}
+            </div>
+          {{/if}}
+        {{else}}
+          {{view App.SpinnerView}}
+        {{/if}}
+      {{/if}}
     </div>
   </div>
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/views/main/service/reassign/step3_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/reassign/step3_view.js b/ambari-web/app/views/main/service/reassign/step3_view.js
index 003fcb6..b6a379e 100644
--- a/ambari-web/app/views/main/service/reassign/step3_view.js
+++ b/ambari-web/app/views/main/service/reassign/step3_view.js
@@ -27,8 +27,8 @@ App.ReassignMasterWizardStep3View = Em.View.extend({
 
   targetHost: Em.computed.alias('controller.content.reassignHosts.target'),
 
-  printReview: function () {
-    $("#step8-info").jqprint();
+  didInsertElement: function () {
+    this.get('controller').loadStep();
   },
 
   jdbcSetupMessage: function() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/app/views/main/service/reassign/step5_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/reassign/step5_view.js b/ambari-web/app/views/main/service/reassign/step5_view.js
index 3a063b5..9c4f39d 100644
--- a/ambari-web/app/views/main/service/reassign/step5_view.js
+++ b/ambari-web/app/views/main/service/reassign/step5_view.js
@@ -43,8 +43,8 @@ App.ReassignMasterWizardStep5View = Em.View.extend({
     }
 
     if (this.get('controller.content.reassign.component_name') === 'APP_TIMELINE_SERVER') {
-      user = this.get('controller.content.serviceProperties.yarn-env.yarn_user');
-      path = this.get('controller.content.serviceProperties.yarn-site')['yarn.timeline-service.leveldb-timeline-store.path'];
+      user = this.get('controller.content.configs.yarn-env.yarn_user');
+      path = this.get('controller.content.configs.yarn-site')['yarn.timeline-service.leveldb-timeline-store.path'];
     }
 
     return Em.I18n.t('services.reassign.step5.body.' + this.get('controller.content.reassign.component_name').toLowerCase() + ha).

http://git-wip-us.apache.org/repos/asf/ambari/blob/cba69d93/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js b/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js
index a43d91f..7dbf24a 100644
--- a/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js
+++ b/ambari-web/test/controllers/main/service/reassign/step1_controller_test.js
@@ -33,7 +33,7 @@ describe('App.ReassignMasterWizardStep1Controller', function () {
   });
   controller.set('_super', Em.K);
 
-  describe('#loadConfigTags', function() {
+  describe('#loadConfigsTags', function() {
     beforeEach(function() {
       this.stub = sinon.stub(App.router, 'get');
     });
@@ -42,7 +42,7 @@ describe('App.ReassignMasterWizardStep1Controller', function () {
       this.stub.restore();
     });
 
-    it('tests loadConfigTags', function() {
+    it('tests loadConfigsTags', function() {
       controller.loadConfigsTags();
       var args = testHelpers.findAjaxRequest('name', 'config.tags');
       expect(args).exists;
@@ -77,8 +77,11 @@ describe('App.ReassignMasterWizardStep1Controller', function () {
     });
 
     it('tests getDatabaseHost', function() {
-      controller.set('content.serviceProperties', {
-        'javax.jdo.option.ConnectionURL': "jdbc:mysql://c6401/hive?createDatabaseIfNotExist=true"
+      controller.set('content.configs', {
+        'hive-site': {
+          'javax.jdo.option.ConnectionURL': 'jdbc:mysql://c6401/hive?createDatabaseIfNotExist=true'
+
+        }
       });
 
       controller.set('content.reassign.service_id', 'HIVE');
@@ -108,7 +111,8 @@ describe('App.ReassignMasterWizardStep1Controller', function () {
       sinon.stub(controller, 'getDatabaseHost', Em.K);
       sinon.stub(controller, 'saveDatabaseType', Em.K);
       sinon.stub(controller, 'saveServiceProperties', Em.K);
-    
+      sinon.stub(controller, 'saveConfigs', Em.K);
+
       reassignCtrl = App.router.reassignMasterController;
       reassignCtrl.set('content.hasManualSteps', true);
     });
@@ -117,12 +121,14 @@ describe('App.ReassignMasterWizardStep1Controller', function () {
       controller.getDatabaseHost.restore();
       controller.saveDatabaseType.restore();
       controller.saveServiceProperties.restore();
+      controller.saveConfigs.restore();
     });
   
     it('should not set hasManualSteps to false for oozie with derby db', function() {
       var data = {
         items: [
           {
+            type: 'oozie-site',
             properties: {
               'oozie.service.JPAService.jdbc.driver': 'jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true'
             }
@@ -141,6 +147,7 @@ describe('App.ReassignMasterWizardStep1Controller', function () {
       var data = {
         items: [
           {
+            type: 'oozie-site',
             properties: {
               'oozie.service.JPAService.jdbc.driver': 'mysql'
             }


[50/50] [abbrv] ambari git commit: Merge branch 'trunk' into branch-dev-patch-upgrade

Posted by nc...@apache.org.
Merge branch 'trunk' into branch-dev-patch-upgrade


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb2c904e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb2c904e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb2c904e

Branch: refs/heads/branch-dev-patch-upgrade
Commit: eb2c904e1b526e1d581e8985e8e966e3d0ce1eb8
Parents: cd245c0 d7f1e8c
Author: Nate Cole <nc...@hortonworks.com>
Authored: Wed Jan 18 10:57:13 2017 -0500
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Wed Jan 18 10:57:13 2017 -0500

----------------------------------------------------------------------
 .../controllers/groups/GroupsEditCtrl.js        |    6 +-
 .../ui/admin-web/app/scripts/i18n.config.js     |    2 +-
 .../app/scripts/services/PermissionsSaver.js    |    2 +-
 ambari-agent/pom.xml                            |   29 +
 .../java/org/apache/ambari/tools/zk/ZkAcl.java  |   23 +-
 .../org/apache/ambari/tools/zk/ZkMigrator.java  |    2 +-
 .../apache/ambari/tools/zk/ZkPathPattern.java   |  114 +
 .../src/main/python/ambari_agent/ActionQueue.py |   15 +-
 .../src/main/python/ambari_agent/Controller.py  |   51 +-
 .../src/main/python/ambari_agent/main.py        |   17 +-
 ambari-agent/src/packages/tarball/all.xml       |   49 +-
 .../apache/ambari/tools/zk/ZkMigratorTest.java  |   45 +-
 .../test/python/ambari_agent/TestHeartbeat.py   |    2 +
 .../src/test/python/ambari_agent/TestMain.py    |    4 +-
 .../core/resources/zkmigrator.py                |    2 +-
 .../libraries/functions/constants.py            |    8 +
 .../libraries/functions/download_from_hdfs.py   |   76 +
 .../libraries/functions/get_config.py           |   45 +
 .../libraries/functions/security_commons.py     |   37 +
 .../functions/setup_ranger_plugin_xml.py        |  101 +-
 .../libraries/functions/solr_cloud_util.py      |   18 +-
 .../libraries/providers/hdfs_resource.py        |  177 +-
 .../libraries/resources/hdfs_resource.py        |   11 +-
 .../libraries/script/script.py                  |   26 +
 .../org/apache/ambari/logfeeder/LogFeeder.java  |    2 +
 .../apache/ambari/logfeeder/util/SSLUtil.java   |   68 +
 .../src/main/scripts/run.sh                     |    2 +-
 .../main/webapp/libs/bower/select2/select2.js   |    4 +
 .../other/daterangepicker/js/daterangepicker.js |   12 +-
 .../src/main/webapp/scripts/utils/Utils.js      | 2255 +++++++++---------
 .../troubleshoot/TroubleShootLayoutView.js      |  748 +++---
 .../logsearch/solr/AmbariSolrCloudCLI.java      |   71 +-
 .../logsearch/solr/AmbariSolrCloudClient.java   |   48 +-
 .../solr/AmbariSolrCloudClientBuilder.java      |    6 -
 .../solr/commands/CopyZnodeZkCommand.java       |   79 -
 .../solr/commands/CreateSaslUsersZkCommand.java |    3 +-
 .../solr/commands/SecureSolrZNodeZkCommand.java |   74 +
 .../solr/commands/SecureZNodeZkCommand.java     |   19 +-
 .../ambari/logsearch/solr/util/AclUtils.java    |   62 +-
 .../ambari-metrics/datasource.js                |   36 +-
 ambari-server/conf/unix/ambari.properties       |    2 +
 ambari-server/conf/unix/metrics.properties      |   46 +-
 ambari-server/conf/windows/metrics.properties   |   46 +-
 .../server/actionmanager/ActionScheduler.java   |   18 +-
 .../actionmanager/ExecutionCommandWrapper.java  |    2 +-
 .../apache/ambari/server/agent/ActionQueue.java |   31 +-
 .../resources/UpgradeResourceDefinition.java    |    4 +-
 .../controller/AmbariActionExecutionHelper.java |   33 +-
 .../AmbariCustomCommandExecutionHelper.java     |    8 +-
 .../controller/AmbariManagementController.java  |    7 +
 .../AmbariManagementControllerImpl.java         |   23 +-
 .../server/controller/ControllerModule.java     |    2 +
 .../HostStackVersionResourceProvider.java       |  117 +-
 .../QuickLinkArtifactResourceProvider.java      |   22 +
 .../internal/UpgradeResourceProvider.java       |   76 +-
 .../metrics/RestMetricsPropertyProvider.java    |    2 +-
 .../controller/utilities/KerberosChecker.java   |   51 +-
 .../utilities/LoginContextHelper.java           |   56 +
 .../upgrade/HostVersionOutOfSyncListener.java   |  101 +-
 .../CachedRoleCommandOrderProvider.java         |   23 +-
 .../server/metadata/RoleCommandOrder.java       |  171 +-
 .../server/metrics/system/MetricsService.java   |   15 +-
 .../server/metrics/system/MetricsSource.java    |   12 +-
 .../system/impl/AbstractMetricsSource.java      |   10 +-
 .../system/impl/AmbariMetricSinkImpl.java       |   18 +-
 .../system/impl/AmbariPerformanceMonitor.java   |  141 ++
 .../system/impl/DatabaseMetricsSource.java      |  199 ++
 .../metrics/system/impl/JvmMetricsSource.java   |   38 +-
 .../system/impl/MetricsConfiguration.java       |   38 +
 .../metrics/system/impl/MetricsServiceImpl.java |   59 +-
 .../dispatchers/AmbariSNMPDispatcher.java       |   37 +-
 .../server/orm/entities/ArtifactEntity.java     |    6 +-
 .../server/security/AmbariEntryPoint.java       |   30 +-
 .../serveraction/upgrades/ConfigureAction.java  |  142 +-
 .../upgrades/UpgradeUserKerberosDescriptor.java |  130 +-
 .../server/stack/ModuleFileUnmarshaller.java    |   11 +
 .../ambari/server/stageplanner/RoleGraph.java   |  120 +-
 .../org/apache/ambari/server/state/Cluster.java |    8 +
 .../ambari/server/state/ConfigHelper.java       |    9 +-
 .../ambari/server/state/UpgradeContext.java     |   43 +-
 .../server/state/UpgradeContextFactory.java     |   47 +
 .../ambari/server/state/UpgradeHelper.java      |   44 +-
 .../server/state/ValueAttributesInfo.java       |   23 +
 .../server/state/cluster/ClusterImpl.java       |   25 +-
 .../ambari/server/state/quicklinks/Link.java    |   11 +
 .../DefaultQuickLinkVisibilityController.java   |  213 ++
 .../QuickLinkVisibilityController.java          |   37 +
 .../QuickLinkVisibilityControllerFactory.java   |   57 +
 .../quicklinksprofile/QuickLinksProfile.java    |   10 +-
 .../QuickLinksProfileEvaluationException.java   |   31 +
 .../QuickLinksProfileEvaluator.java             |  202 --
 .../QuickLinksProfileEvaluatorException.java    |   27 -
 .../ShowAllLinksVisibilityController.java       |   38 +
 .../state/stack/StackRoleCommandOrder.java      |   21 +-
 .../ambari/server/state/stack/UpgradePack.java  |   67 +-
 .../upgrade/ConfigUpgradeChangeDefinition.java  |  268 ++-
 .../state/stack/upgrade/ConfigureTask.java      |   50 +-
 .../state/stack/upgrade/HostOrderGrouping.java  |  144 +-
 .../state/stack/upgrade/StageWrapper.java       |   11 +
 .../stack/upgrade/StageWrapperBuilder.java      |    6 +-
 .../server/state/stack/upgrade/TaskWrapper.java |   14 +-
 .../server/upgrade/SchemaUpgradeUtil.java       |   76 +
 .../server/upgrade/UpgradeCatalog250.java       |  330 +++
 .../apache/ambari/server/view/ClusterImpl.java  |    9 +
 .../ambari/server/view/RemoteAmbariCluster.java |   30 +
 .../apache/ambari/server/view/ViewRegistry.java |   29 +-
 .../python/ambari_server/serverConfiguration.py |    5 +-
 .../main/python/ambari_server/serverSetup.py    |    6 +
 .../src/main/python/ambari_server/utils.py      |    6 +
 .../package/scripts/accumulo_script.py          |   14 +-
 .../configuration/infra-solr-client-log4j.xml   |    4 +-
 .../0.1.0/configuration/infra-solr-log4j.xml    |   18 +-
 .../0.1.0/properties/solr-log4j.properties.j2   |    4 +-
 .../0.1.0/configuration/ams-hbase-log4j.xml     |    8 +-
 .../0.1.0/configuration/ams-log4j.xml           |    4 +-
 .../0.1.0/configuration/ams-ssl-client.xml      |    9 +
 .../HDP/grafana-hdfs-topn.json                  |  846 +++++++
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py |    2 +-
 .../0.1.0/package/scripts/metrics_grafana.py    |    2 +-
 .../0.1.0/package/scripts/params.py             |    3 +
 .../ATLAS/0.1.0.2.3/package/scripts/metadata.py |   13 +
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |   81 +-
 .../0.1.0.2.3/package/scripts/service_check.py  |   20 +-
 .../package/scripts/setup_ranger_atlas.py       |    4 +-
 .../0.9.2/configuration/druid-historical.xml    |    4 +-
 .../0.9.2/configuration/druid-logrotate.xml     |   24 +-
 .../0.9.2/configuration/druid-middlemanager.xml |    2 +-
 .../DRUID/0.9.2/package/scripts/druid.py        |   36 +-
 .../DRUID/0.9.2/package/scripts/params.py       |    5 +
 .../0.5.0.2.1/configuration/falcon-log4j.xml    |   46 +
 .../FALCON/0.5.0.2.1/package/scripts/falcon.py  |   17 +-
 .../0.5.0.2.1/package/scripts/params_linux.py   |    8 +
 .../0.96.0.2.0/package/scripts/params_linux.py  |  165 +-
 .../package/scripts/setup_ranger_hbase.py       |    4 +-
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml |    3 +
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |    5 +-
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |   59 +-
 .../2.1.0.2.0/package/scripts/journalnode.py    |    5 +-
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |   32 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  166 +-
 .../2.1.0.2.0/package/scripts/service_check.py  |   24 +-
 .../package/scripts/setup_ranger_hdfs.py        |   44 +-
 .../configuration/ranger-hdfs-audit.xml         |   94 -
 .../ranger-hdfs-plugin-properties.xml           |   18 +-
 .../3.0.0.3.0/package/scripts/hdfs_namenode.py  |   58 +-
 .../3.0.0.3.0/package/scripts/journalnode.py    |    5 +-
 .../HDFS/3.0.0.3.0/package/scripts/namenode.py  |   36 +-
 .../3.0.0.3.0/package/scripts/service_check.py  |   24 +-
 .../HDFS/3.0.0.3.0/service_advisor.py           |  602 +++++
 .../0.12.0.2.0/configuration/hive-log4j.xml     |   25 +-
 .../0.12.0.2.0/configuration/webhcat-log4j.xml  |   23 +
 .../package/alerts/alert_hive_metastore.py      |    2 +-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |   32 +-
 .../package/scripts/hive_interactive.py         |   15 +-
 .../0.12.0.2.0/package/scripts/hive_server.py   |   20 +
 .../package/scripts/hive_server_interactive.py  |    4 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |  175 +-
 .../package/scripts/setup_ranger_hive.py        |    6 +-
 .../HIVE/0.12.0.2.0/package/scripts/webhcat.py  |    2 +-
 .../package/templates/zkmigrator_jaas.conf.j2   |   26 +
 .../KAFKA/0.8.1/package/scripts/params.py       |  127 +-
 .../0.8.1/package/scripts/setup_ranger_kafka.py |   10 +-
 .../0.9.0/configuration/ranger-kafka-audit.xml  |   32 +-
 .../ranger-kafka-plugin-properties.xml          |   14 +-
 .../ranger-kafka-policymgr-ssl.xml              |   12 +-
 .../configuration/ranger-kafka-security.xml     |   18 +-
 .../ranger-knox-plugin-properties.xml           |   12 +-
 .../KNOX/0.5.0.2.2/package/scripts/knox.py      |   35 +-
 .../0.5.0.2.2/package/scripts/params_linux.py   |  182 +-
 .../0.5.0.2.2/package/scripts/params_windows.py |    2 +-
 .../package/scripts/setup_ranger_knox.py        |   14 +-
 .../configuration/logfeeder-ambari-config.xml   |    2 +-
 .../logfeeder-custom-logsearch-conf.xml         |    6 +-
 .../0.5.0/configuration/logfeeder-env.xml       |   38 +-
 .../0.5.0/configuration/logfeeder-log4j.xml     |   28 +-
 .../configuration/logfeeder-output-config.xml   |    4 +-
 .../configuration/logfeeder-properties.xml      |    8 +-
 .../configuration/logfeeder-system_log-env.xml  |    2 +
 .../logsearch-audit_logs-solrconfig.xml         |    2 +-
 .../0.5.0/configuration/logsearch-log4j.xml     |   58 +-
 .../LOGSEARCH/0.5.0/metainfo.xml                |   22 +
 .../LOGSEARCH/0.5.0/package/scripts/params.py   |    1 +
 .../0.5.0/package/scripts/setup_logfeeder.py    |   20 +
 .../0.5.0/package/scripts/setup_logsearch.py    |    6 +-
 .../properties/input.config-ambari.json.j2      |   10 +-
 .../0.5.0/properties/logfeeder-env.sh.j2        |    2 -
 .../0.5.0/properties/logsearch-log4j.xml.j2     |   18 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |   24 +-
 .../4.0.0.2.0/package/scripts/oozie_server.py   |   11 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |   19 +-
 .../package/templates/zkmigrator_jaas.conf.j2   |   26 +
 .../4.2.0.2.3/configuration/oozie-site.xml      |    1 +
 .../OOZIE/4.2.0.2.3/kerberos.json               |    3 +-
 .../OOZIE/4.2.0.2.3/metainfo.xml                |    1 +
 .../4.2.0.2.5/configuration/oozie-site.xml      |    2 +-
 .../RANGER/0.4.0/package/scripts/params.py      |    9 +-
 .../0.4.0/package/scripts/setup_ranger_xml.py   |    6 +-
 .../0.5.0/configuration/ranger-ugsync-site.xml  |   10 +-
 .../RANGER/0.6.0/configuration/admin-log4j.xml  |   24 +
 .../0.6.0/configuration/tagsync-log4j.xml       |   23 +
 .../0.6.0/configuration/usersync-log4j.xml      |   23 +
 .../RANGER/0.7.0/configuration/ranger-env.xml   |   28 +
 .../0.5.0.2.3/configuration/kms-log4j.xml       |   46 +
 .../configuration/ranger-kms-security.xml       |    6 +
 .../RANGER_KMS/0.5.0.2.3/package/scripts/kms.py |    2 +-
 .../0.5.0.2.3/package/scripts/params.py         |    7 +-
 .../2.0.0/package/scripts/livy2_server.py       |  149 ++
 .../2.0.0/package/scripts/livy2_service.py      |   48 +
 .../SPARK2/2.0.0/package/scripts/params.py      |   70 +-
 .../2.0.0/package/scripts/service_check.py      |   31 +-
 .../SPARK2/2.0.0/package/scripts/setup_livy2.py |   80 +
 .../2.0.0/package/scripts/status_params.py      |    6 +
 .../SQOOP/1.4.4.2.0/package/scripts/sqoop.py    |   21 +-
 .../0.10.0/configuration/ranger-storm-audit.xml |   32 +-
 .../ranger-storm-policymgr-ssl.xml              |   12 +-
 .../configuration/ranger-storm-security.xml     |   18 +-
 .../configuration/storm-cluster-log4j.xml       |   25 +-
 .../0.10.0/configuration/storm-worker-log4j.xml |   75 +-
 .../STORM/0.9.1/package/scripts/params_linux.py |  171 +-
 .../0.9.1/package/scripts/setup_ranger_storm.py |   39 +-
 .../ranger-storm-plugin-properties.xml          |   71 +
 .../1.0.1/configuration/storm-cluster-log4j.xml |   25 +-
 .../1.0.1/configuration/storm-worker-log4j.xml  |   75 +-
 .../YARN/2.1.0.2.0/kerberos.json                |   13 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  219 +-
 .../package/scripts/resourcemanager.py          |    4 +-
 .../package/scripts/setup_ranger_yarn.py        |    4 +-
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  |   12 -
 .../YARN/3.0.0.3.0/kerberos.json                |   17 +-
 .../3.0.0.3.0/package/scripts/params_linux.py   |    4 +-
 .../package/scripts/resourcemanager.py          |    2 -
 .../0.6.0.2.5/configuration/zeppelin-env.xml    |   87 -
 .../configuration/zeppelin-log4j-properties.xml |   37 +
 .../configuration/zeppelin-shiro-ini.xml        |   63 +
 .../ZEPPELIN/0.6.0.2.5/metainfo.xml             |    2 +
 .../0.6.0.2.5/package/scripts/master.py         |    2 +-
 .../0.6.0.2.5/package/scripts/params.py         |    4 +-
 .../3.4.5/package/scripts/zookeeper_server.py   |    9 +-
 .../ZOOKEEPER/3.4.9/service_advisor.py          |  168 ++
 .../src/main/resources/configuration-schema.xsd |    8 +
 .../custom_actions/scripts/ru_execute_tasks.py  |    2 +-
 .../src/main/resources/scripts/stack_advisor.py |   14 +-
 .../HDFS/package/scripts/hdfs_namenode.py       |    3 +-
 .../stacks/BIGTOP/0.8/services/stack_advisor.py |  184 +-
 .../2.0.6/hooks/after-INSTALL/scripts/params.py |    3 +-
 .../HDP/2.0.6/properties/stack_features.json    |   45 +-
 .../stacks/HDP/2.0.6/services/stack_advisor.py  |  819 ++-----
 .../stacks/HDP/2.1/services/OOZIE/metainfo.xml  |    1 +
 .../stacks/HDP/2.1/services/stack_advisor.py    |   94 +-
 .../ranger-hbase-plugin-properties.xml          |   10 +-
 .../ranger-hdfs-plugin-properties.xml           |   12 +-
 .../services/HIVE/configuration/hive-site.xml   |    1 +
 .../ranger-hive-plugin-properties.xml           |   10 +-
 .../ranger-knox-plugin-properties.xml           |    2 +-
 .../stacks/HDP/2.2/services/YARN/kerberos.json  |   17 +-
 .../stacks/HDP/2.2/services/stack_advisor.py    |  176 +-
 .../HDP/2.3.ECS/services/YARN/kerberos.json     |   17 +-
 .../HBASE/configuration/ranger-hbase-audit.xml  |   32 +-
 .../ranger-hbase-policymgr-ssl.xml              |   12 +-
 .../configuration/ranger-hbase-security.xml     |   20 +-
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   12 +-
 .../HDFS/configuration/ranger-hdfs-security.xml |   20 +-
 .../HIVE/configuration/ranger-hive-audit.xml    |   32 +-
 .../configuration/ranger-hive-policymgr-ssl.xml |   12 +-
 .../HIVE/configuration/ranger-hive-security.xml |   20 +-
 .../ranger-kafka-policymgr-ssl.xml              |    4 +-
 .../KNOX/configuration/ranger-knox-audit.xml    |   32 +-
 .../configuration/ranger-knox-policymgr-ssl.xml |   12 +-
 .../KNOX/configuration/ranger-knox-security.xml |   18 +-
 .../ranger-storm-policymgr-ssl.xml              |    4 +-
 .../configuration/ranger-storm-security.xml     |    2 +-
 .../YARN/configuration/ranger-yarn-audit.xml    |   32 +-
 .../ranger-yarn-plugin-properties.xml           |   12 +-
 .../configuration/ranger-yarn-policymgr-ssl.xml |   12 +-
 .../YARN/configuration/ranger-yarn-security.xml |   18 +-
 .../stacks/HDP/2.3/services/YARN/kerberos.json  |   17 +-
 .../stacks/HDP/2.3/services/stack_advisor.py    |   36 +-
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml  |  153 ++
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml |    2 +
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml |   99 +
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml |   96 +-
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml |  104 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml     |   13 +
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml     |   58 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml     |   57 +-
 .../stacks/HDP/2.3/upgrades/upgrade-2.6.xml     |   13 +
 .../stacks/HDP/2.4/upgrades/config-upgrade.xml  |  179 +-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml |    2 +
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml |  105 +-
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml |   99 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.4.xml     |    9 +
 .../stacks/HDP/2.4/upgrades/upgrade-2.5.xml     |   49 +-
 .../stacks/HDP/2.4/upgrades/upgrade-2.6.xml     |   50 +-
 .../stacks/HDP/2.5/role_command_order.json      |    6 +
 .../ATLAS/configuration/ranger-atlas-audit.xml  |    6 +-
 .../ranger-atlas-plugin-properties.xml          |   58 +-
 .../ranger-atlas-policymgr-ssl.xml              |   12 +-
 .../configuration/ranger-atlas-security.xml     |   20 +-
 .../ranger-hbase-plugin-properties.xml          |   71 +
 .../ranger-hdfs-plugin-properties.xml           |   50 +-
 .../HIVE/configuration/hive-interactive-env.xml |   22 +-
 .../configuration/hive-interactive-site.xml     |   15 +-
 .../HIVE/configuration/hivemetastore-site.xml   |    7 +-
 .../hiveserver2-interactive-site.xml            |    7 +-
 .../HIVE/configuration/hiveserver2-site.xml     |    7 +-
 .../HIVE/configuration/llap-daemon-log4j.xml    |   25 +-
 .../ranger-hive-plugin-properties.xml           |   71 +
 .../HIVE/configuration/ranger-hive-security.xml |    2 +-
 .../HIVE/configuration/tez-interactive-site.xml |    2 +-
 .../ranger-kafka-plugin-properties.xml          |   71 +
 .../ranger-knox-plugin-properties.xml           |   71 +
 .../ranger-storm-policymgr-ssl.xml              |    4 +-
 .../configuration/ranger-storm-security.xml     |    2 +-
 .../ranger-yarn-plugin-properties.xml           |   71 +
 .../stacks/HDP/2.5/services/YARN/kerberos.json  |   17 +-
 .../stacks/HDP/2.5/services/stack_advisor.py    | 1498 ++++++------
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |  227 ++
 .../HDP/2.5/upgrades/host-ordered-upgrade.xml   |  597 -----
 .../HDP/2.5/upgrades/host-upgrade-2.5.xml       |  593 +++++
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml |    2 +
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |  123 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.5.xml     |    9 +
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |   68 +
 .../stacks/HDP/2.6/services/ATLAS/metainfo.xml  |    4 +-
 .../HIVE/configuration/hive-interactive-env.xml |   22 +
 .../configuration/hive-interactive-site.xml     |   19 +-
 .../services/KNOX/configuration/topology.xml    |  164 ++
 .../stacks/HDP/2.6/services/OOZIE/metainfo.xml  |    3 +
 .../stacks/HDP/2.6/services/SLIDER/metainfo.xml |    2 +-
 .../stacks/HDP/2.6/services/SPARK/metainfo.xml  |    2 +-
 .../SPARK2/configuration/livy2-conf.xml         |   82 +
 .../services/SPARK2/configuration/livy2-env.xml |   95 +
 .../configuration/livy2-log4j-properties.xml    |   42 +
 .../configuration/livy2-spark-blacklist.xml     |   52 +
 .../HDP/2.6/services/SPARK2/kerberos.json       |  126 +
 .../stacks/HDP/2.6/services/SPARK2/metainfo.xml |   95 +-
 .../stacks/HDP/2.6/services/SQOOP/metainfo.xml  |    2 +-
 .../stacks/HDP/2.6/services/STORM/metainfo.xml  |    2 +-
 .../stacks/HDP/2.6/services/TEZ/metainfo.xml    |    2 +-
 .../stacks/HDP/2.6/services/YARN/metainfo.xml   |    2 +-
 .../ZEPPELIN/configuration/zeppelin-env.xml     |  182 ++
 .../HDP/2.6/services/ZEPPELIN/metainfo.xml      |    2 +-
 .../HDP/2.6/services/ZOOKEEPER/metainfo.xml     |    2 +-
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |   34 +
 .../HDP/2.6/upgrades/host-ordered-upgrade.xml   |  573 -----
 .../HDP/2.6/upgrades/host-upgrade-2.6.xml       |  562 +++++
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |    2 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |    7 +
 .../3.0/hooks/after-INSTALL/scripts/params.py   |    3 +-
 .../HDP/3.0/properties/stack_features.json      |    3 +-
 .../stacks/HDP/3.0/role_command_order.json      |  149 ++
 .../stacks/HDPWIN/2.1/services/stack_advisor.py |  113 +-
 .../stacks/HDPWIN/2.2/services/stack_advisor.py |   99 +-
 .../stacks/PERF/1.0/services/YARN/kerberos.json |   13 +-
 .../main/resources/stacks/service_advisor.py    |   65 +-
 .../src/main/resources/stacks/stack_advisor.py  | 1075 ++++++++-
 .../src/main/resources/upgrade-config.xsd       |   30 +-
 .../src/main/resources/upgrade-pack.xsd         |   14 +-
 .../webapp/WEB-INF/spring-security.xml          |    1 +
 .../actionmanager/TestActionScheduler.java      |    5 +-
 .../ambari/server/agent/AgentResourceTest.java  |    3 +
 .../UpgradeResourceDefinitionTest.java          |    7 +-
 .../AmbariManagementControllerTest.java         |    2 +-
 .../server/controller/KerberosHelperTest.java   |    6 +
 .../ActiveWidgetLayoutResourceProviderTest.java |    3 +
 .../HostStackVersionResourceProviderTest.java   |   96 +
 .../QuickLinkArtifactResourceProviderTest.java  |  198 ++
 .../StackUpgradeConfigurationMergeTest.java     |    6 +-
 .../UpgradeResourceProviderHDP22Test.java       |    2 +
 .../internal/UpgradeResourceProviderTest.java   |  147 +-
 .../UserAuthorizationResourceProviderTest.java  |    3 +
 .../internal/UserResourceProviderTest.java      |    3 +
 .../metrics/JMXPropertyProviderTest.java        |    7 +-
 .../utilities/KerberosCheckerTest.java          |   43 +-
 .../HostVersionOutOfSyncListenerTest.java       |  121 +-
 .../server/metadata/RoleCommandOrderTest.java   |   64 +
 .../ambari/server/metadata/RoleGraphTest.java   |   73 +
 .../system/impl/JvmMetricsSourceTest.java       |   36 -
 .../metric/system/impl/MetricsServiceTest.java  |   20 +-
 .../metric/system/impl/MetricsSourceTest.java   |  171 ++
 .../system/impl/TestAmbariMetricsSinkImpl.java  |    1 +
 .../metric/system/impl/TestMetricsSource.java   |   13 +-
 .../dispatchers/AmbariSNMPDispatcherTest.java   |   13 +
 .../server/security/AmbariEntryPointTest.java   |   88 +
 .../AmbariBasicAuthenticationFilterTest.java    |   37 +-
 .../upgrades/ConfigureActionTest.java           |  147 +-
 .../UpgradeUserKerberosDescriptorTest.java      |  205 ++
 .../ambari/server/state/UpgradeHelperTest.java  |   74 +-
 .../cluster/ClusterEffectiveVersionTest.java    |    4 +
 .../server/state/cluster/ClusterTest.java       |    5 +-
 .../state/quicklinksprofile/EvaluatorTest.java  |  203 --
 .../quicklinksprofile/FilterEvaluatorTest.java  |  218 ++
 ...uickLinkVisibilityControllerFactoryTest.java |   82 +
 .../QuickLinkVisibilityControllerTest.java      |  181 ++
 .../QuickLinksProfileEvaluatorTest.java         |  168 --
 .../QuickLinksProfileParserTest.java            |    8 +-
 .../state/stack/UpgradePackParsingTest.java     |   92 +
 .../server/state/stack/UpgradePackTest.java     |  168 +-
 .../server/upgrade/UpgradeCatalog222Test.java   |    4 +-
 .../server/upgrade/UpgradeCatalog250Test.java   |  936 +++++++-
 .../view/persistence/DataStoreImplTest.java     |  110 +-
 .../src/test/python/TestStackAdvisor.py         |   52 +-
 .../AMBARI_METRICS/test_metrics_collector.py    |    2 +-
 .../AMBARI_METRICS/test_metrics_grafana.py      |    2 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   58 +-
 .../stacks/2.0.6/HDFS/test_service_check.py     |   16 +-
 .../stacks/2.0.6/HIVE/test_hive_client.py       |    9 +-
 .../stacks/2.0.6/HIVE/test_hive_metastore.py    |   13 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       |    9 +-
 .../stacks/2.0.6/HIVE/test_webhcat_server.py    |    7 +-
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |   13 +-
 .../stacks/2.0.6/common/test_stack_advisor.py   |  425 +++-
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |    6 +-
 .../python/stacks/2.0.6/configs/default.json    |   12 +-
 .../stacks/2.0.6/configs/default_client.json    |    3 +-
 .../2.0.6/configs/default_hive_nn_ha.json       |    5 +-
 .../2.0.6/configs/default_hive_nn_ha_2.json     |    5 +-
 .../2.0.6/configs/default_hive_non_hdfs.json    |    5 +-
 .../2.0.6/configs/default_no_install.json       |    5 +-
 .../2.0.6/configs/default_with_bucket.json      |    4 +-
 .../2.0.6/configs/ha_bootstrap_active_node.json |    2 +-
 .../configs/ha_bootstrap_standby_node.json      |    2 +-
 ...ha_bootstrap_standby_node_initial_start.json |    2 +-
 ...dby_node_initial_start_dfs_nameservices.json |    2 +-
 .../python/stacks/2.0.6/configs/ha_default.json |    4 +-
 .../python/stacks/2.0.6/configs/ha_secured.json |    2 +-
 .../python/stacks/2.0.6/configs/hbase-2.2.json  |    4 +-
 .../2.0.6/configs/hbase-rs-2.2-phoenix.json     |    4 +-
 .../stacks/2.0.6/configs/hbase-rs-2.2.json      |    4 +-
 .../test/python/stacks/2.0.6/configs/nn_eu.json |    2 +-
 .../python/stacks/2.0.6/configs/nn_ru_lzo.json  |    2 +-
 .../python/stacks/2.0.6/configs/secured.json    |   14 +-
 .../stacks/2.0.6/configs/secured_client.json    |    3 +-
 .../stacks/2.1/FALCON/test_falcon_client.py     |    6 +
 .../stacks/2.1/FALCON/test_falcon_server.py     |   18 +
 .../stacks/2.1/HIVE/test_hive_metastore.py      |    5 +
 .../stacks/2.1/common/test_stack_advisor.py     |    7 +-
 .../stacks/2.1/configs/default-storm-start.json |    2 +-
 .../test/python/stacks/2.1/configs/default.json |   11 +-
 .../stacks/2.1/configs/secured-storm-start.json |    2 +-
 .../test/python/stacks/2.1/configs/secured.json |    5 +-
 .../stacks/2.2/common/test_stack_advisor.py     |  140 +-
 .../2.2/common/test_stack_advisor_perf.py       |   22 +-
 .../test/python/stacks/2.2/configs/default.json |    6 +-
 .../stacks/2.2/configs/falcon-upgrade.json      |    7 +-
 .../python/stacks/2.2/configs/hive-upgrade.json |    3 +-
 .../2.2/configs/pig-service-check-secure.json   |   15 +-
 .../stacks/2.3/ATLAS/test_metadata_server.py    |    8 +
 .../stacks/2.3/common/test_stack_advisor.py     |   38 +-
 .../stacks/2.4/LOGSEARCH/test_logfeeder.py      |   20 +
 .../test/python/stacks/2.4/configs/default.json |    4 +-
 .../stacks/2.5/ATLAS/test_atlas_server.py       |    2 +
 .../stacks/2.5/RANGER/test_ranger_admin.py      |    6 +-
 .../stacks/2.5/RANGER/test_ranger_tagsync.py    |    2 +-
 .../stacks/2.5/RANGER/test_ranger_usersync.py   |    2 +-
 .../stacks/2.5/RANGER_KMS/test_kms_server.py    |    4 +-
 .../stacks/2.5/ZEPPELIN/test_zeppelin_master.py |   26 +-
 .../stacks/2.5/common/test_stack_advisor.py     |   89 +-
 .../test/python/stacks/2.5/configs/default.json |   26 +-
 .../python/stacks/2.5/configs/hsi_default.json  |    3 +-
 .../test/python/stacks/2.5/configs/hsi_ha.json  |    3 +-
 .../test/python/stacks/2.5/configs/secured.json |   18 +-
 .../test/python/stacks/2.6/DRUID/test_druid.py  |  101 +-
 .../stacks/2.6/RANGER/test_ranger_admin.py      |    6 +-
 .../stacks/2.6/RANGER/test_ranger_tagsync.py    |    2 +-
 .../stacks/2.6/SPARK2/test_spark_livy2.py       |  120 +
 .../test/python/stacks/2.6/configs/default.json |   38 +-
 .../resources/example_quicklinks_profile.json   |    9 +
 .../inconsistent_quicklinks_profile_2.json      |   13 +
 .../test_kerberos_descriptor_2_1_3.json         |   13 +-
 .../src/test/resources/metrics.properties       |    5 +-
 .../HDP/2.1.1/services/HBASE/metainfo.xml       |   44 +
 .../HDP/2.1.1/services/STORM/metainfo.xml       |    1 +
 .../HDP/2.1.1/upgrades/upgrade_bucket_test.xml  |    4 +
 .../upgrades/upgrade_component_tasks_test.xml   |  133 ++
 .../HDP/2.1.1/upgrades/upgrade_direction.xml    |    5 +
 .../upgrades/upgrade_nonrolling_new_stack.xml   |    6 +
 .../stacks/HDP/2.1.1/upgrades/upgrade_test.xml  |    8 +
 .../HDP/2.1.1/upgrades/upgrade_test_checks.xml  |    6 +
 .../HDP/2.1.1/upgrades/upgrade_test_partial.xml |    8 +
 .../HDP/2.1.1/upgrades/upgrade_to_new_stack.xml |   10 +-
 .../stacks/HDP/2.2.0/role_command_order.json    |    5 +
 .../HDP/2.2.0/services/STORM/metainfo.xml       |   37 +
 .../stacks/HDP/2.2.0/upgrades/upgrade_test.xml  |    7 +
 .../HDP/2.2.0/upgrades/upgrade_test_15388.xml   |    7 +
 .../HDP/2.2.0/upgrades/upgrade_test_checks.xml  |    6 +
 .../upgrades/upgrade_test_host_ordered.xml      |    2 +
 .../upgrades/upgrade_test_skip_failures.xml     |    2 +
 .../HDP/2.2.0/upgrades/upgrade_test_15388.xml   |    7 +
 .../org/apache/ambari/view/cluster/Cluster.java |    6 +
 .../data/stacks/HDP-2.1/service_components.json |   13 +-
 ambari-web/app/assets/test/tests.js             |    2 +
 ambari-web/app/controllers/experimental.js      |    2 +
 .../rangerAdmin/step3_controller.js             |   43 +-
 .../rangerAdmin/step4_controller.js             |    2 +-
 .../rangerAdmin/wizard_controller.js            |    2 +
 .../main/admin/stack_and_upgrade_controller.js  |    7 +-
 .../alerts/definition_details_controller.js     |   24 +-
 .../alerts/manage_alert_groups_controller.js    |  194 +-
 .../manage_alert_notifications_controller.js    |  106 +-
 ambari-web/app/controllers/main/host/details.js |   29 +-
 .../controllers/main/service/info/configs.js    |   10 +-
 .../controllers/main/service/info/summary.js    |    9 +-
 ambari-web/app/controllers/main/service/item.js |   28 +-
 .../main/service/reassign/step1_controller.js   |   78 +-
 .../main/service/reassign/step3_controller.js   |  663 +++++
 .../main/service/reassign/step4_controller.js   |  629 +----
 .../main/service/reassign_controller.js         |   47 +-
 ambari-web/app/controllers/wizard.js            |    2 +-
 .../app/controllers/wizard/step0_controller.js  |    3 +
 .../app/controllers/wizard/step4_controller.js  |   22 +-
 .../app/controllers/wizard/step7_controller.js  |    8 +-
 .../app/controllers/wizard/step8_controller.js  |   93 +-
 .../configs/services/ambari_infra_properties.js |   28 +
 .../configs/services/logsearch_properties.js    |   84 +
 .../app/data/configs/wizards/secure_mapping.js  |   12 +-
 .../app/mappers/alert_definitions_mapper.js     |   16 +-
 ambari-web/app/mappers/stack_service_mapper.js  |    3 -
 ambari-web/app/messages.js                      |   30 +-
 .../mixins/common/configs/enhanced_configs.js   |    8 +-
 .../configs/component_actions_by_configs.js     |   19 +-
 .../wizard/wizardDeployProgressController.js    |    2 +-
 ambari-web/app/models/alerts/alert_config.js    |    4 +-
 ambari-web/app/models/alerts/alert_group.js     |    2 +-
 .../app/models/alerts/alert_notification.js     |    4 +-
 .../models/configs/objects/service_config.js    |    1 +
 .../configs/objects/service_config_property.js  |    8 +-
 .../app/models/configs/theme/config_action.js   |    2 +
 ambari-web/app/models/configs/theme/tab.js      |    9 +-
 ambari-web/app/routes/main.js                   |    5 +
 .../app/routes/ra_high_availability_routes.js   |    4 +
 ambari-web/app/routes/reassign_master_routes.js |   24 +-
 ambari-web/app/styles/application.less          |   26 +-
 .../app/styles/theme/bootstrap-ambari.css       |   43 +-
 ambari-web/app/styles/wizard.less               |  113 +-
 .../common/configs/widgets/controls.hbs         |    8 +-
 .../modal_popups/dependent_configs_list.hbs     |   17 +-
 .../common/modal_popups/select_groups_popup.hbs |    2 +-
 .../highAvailability/journalNode/step2.hbs      |   65 +-
 .../highAvailability/rangerAdmin/step1.hbs      |    6 +-
 .../highAvailability/rangerAdmin/step3.hbs      |    8 +-
 .../templates/main/service/add_host_popup.hbs   |   17 +-
 .../templates/main/service/reassign/step3.hbs   |   36 +-
 ambari-web/app/templates/wizard/step0.hbs       |    4 +-
 ambari-web/app/templates/wizard/step1.hbs       |  326 ++-
 ambari-web/app/templates/wizard/step2.hbs       |  120 +-
 ambari-web/app/templates/wizard/step3.hbs       |   45 +-
 ambari-web/app/utils/ajax/ajax.js               |   15 +-
 ambari-web/app/utils/configs/theme/theme.js     |    6 +-
 ambari-web/app/utils/ember_computed.js          |   82 +
 ambari-web/app/views/common/controls_view.js    |    1 +
 .../highAvailability/rangerAdmin/step3_view.js  |    6 +-
 .../admin/stack_upgrade/upgrade_history_view.js |    4 +-
 .../main/alerts/definition_details_view.js      |    4 +-
 .../main/host/details/host_component_view.js    |   12 +-
 ambari-web/app/views/main/service/menu.js       |    4 +-
 .../views/main/service/reassign/step3_view.js   |    4 +-
 .../views/main/service/reassign/step5_view.js   |    4 +-
 ambari-web/app/views/wizard/step2_view.js       |    7 +-
 .../test/aliases/computed/existsInByKey.js      |   59 +
 ambari-web/test/aliases/computed/filterBy.js    |    2 +-
 .../test/aliases/computed/ifThenElseByKeys.js   |   65 +
 .../test/aliases/computed/notExistsInByKey.js   |   59 +
 ambari-web/test/app_test.js                     |    8 +-
 .../journalNode/step4_controller_test.js        |    2 +-
 .../rangerAdmin/step3_controller_test.js        |  114 +
 .../resourceManager/step3_controller_test.js    |   22 +-
 .../admin/stack_and_upgrade_controller_test.js  |    4 -
 .../manage_alert_groups_controller_test.js      |   57 +
 ...anage_alert_notifications_controller_test.js |   10 +
 .../test/controllers/main/host/details_test.js  |   50 +-
 .../main/service/info/config_test.js            |   15 +-
 .../test/controllers/main/service/item_test.js  |   36 +
 .../service/reassign/step1_controller_test.js   |   17 +-
 .../service/reassign/step3_controller_test.js   |  634 +++++
 .../service/reassign/step4_controller_test.js   |  646 +----
 .../test/controllers/wizard/step4_test.js       |   90 +-
 .../test/controllers/wizard/step8_test.js       |   28 +-
 ambari-web/test/init_computed_aliases.js        |    3 +
 .../common/configs/enhanced_configs_test.js     |   22 +
 .../test/models/alerts/alert_config_test.js     |   21 +
 .../test/models/alerts/alert_group_test.js      |   25 +
 .../models/alerts/alert_notification_test.js    |   50 +
 .../objects/service_config_property_test.js     |   55 +-
 .../models/configs/theme/sub_section_test.js    |    2 +
 .../test/models/configs/theme/tab_test.js       |    2 +
 ambari-web/test/utils/config_test.js            |   10 +-
 .../test/utils/configs/theme/theme_test.js      |   48 +-
 ambari-web/test/utils/ember_computed_test.js    |  114 +-
 .../main/alerts/definition_details_view_test.js |   32 +-
 .../host/details/host_component_view_test.js    |   40 +-
 ambari-web/test/views/main/service/menu_test.js |   15 +
 .../ambari/fast_hdfs_resource/Resource.java     |   63 +-
 .../ambari/fast_hdfs_resource/Runner.java       |   20 +-
 .../HDF/2.0/properties/stack_features.json      |    3 +-
 .../ui/app/components/capacityInput.js          |   25 +-
 .../resources/ui/app/components/pathInput.js    |    3 +-
 .../src/main/resources/ui/app/serializers.js    |    8 +-
 .../resources/ui/app/styles/application.less    |   10 +-
 .../ui/app/templates/capacityEditForm.hbs       |    8 +-
 .../templates/components/editLabelCapacity.hbs  |    4 +-
 .../templates/components/editQueueCapacity.hbs  |    4 +-
 .../app/templates/components/queueContainer.hbs |    2 +-
 contrib/views/commons/pom.xml                   |   20 +
 .../view/commons/hdfs/FileOperationService.java |   36 +-
 .../ambari/view/commons/hdfs/HdfsService.java   |   21 +-
 .../ambari/view/commons/hdfs/UploadService.java |   43 +-
 .../ambari/view/commons/hdfs/UserService.java   |   20 +-
 .../view/commons/hdfs/ViewPropertyHelper.java   |   55 +
 contrib/views/files/pom.xml                     |   44 +-
 .../view/filebrowser/DownloadService.java       |   77 +-
 .../view/filebrowser/FileBrowserService.java    |   29 +-
 .../view/filebrowser/FilePreviewService.java    |   16 +-
 .../ambari/view/filebrowser/HelpService.java    |   17 +-
 contrib/views/files/src/main/resources/view.xml |    7 +
 contrib/views/hive-next/pom.xml                 |   64 +-
 .../apache/ambari/view/hive2/HelpService.java   |    2 +-
 .../view/hive2/internal/HdfsApiSupplier.java    |   10 +-
 .../view/hive2/resources/files/FileService.java |   21 +-
 .../jobs/atsJobs/ATSParserFactory.java          |   13 +-
 .../view/hive2/utils/SharedObjectsFactory.java  |   14 +-
 .../views/hive-next/src/main/resources/view.xml |    6 +
 contrib/views/hive20/pom.xml                    |   64 +-
 .../apache/ambari/view/hive20/AuthParams.java   |    2 +
 .../ambari/view/hive20/ConnectionSystem.java    |    4 +
 .../apache/ambari/view/hive20/HelpService.java  |    2 +-
 .../view/hive20/actor/DatabaseManager.java      |   44 +-
 .../ambari/view/hive20/actor/JdbcConnector.java |    6 +
 .../view/hive20/actor/MetaDataManager.java      |    9 +-
 .../view/hive20/actor/MetaDataRetriever.java    |   12 +-
 .../ambari/view/hive20/actor/message/Ping.java  |   10 +
 .../view/hive20/internal/HdfsApiSupplier.java   |   10 +-
 .../view/hive20/internal/dto/ColumnInfo.java    |    1 -
 .../view/hive20/internal/dto/ColumnStats.java   |  170 ++
 .../view/hive20/internal/dto/TableMeta.java     |    9 +
 .../view/hive20/internal/dto/TableStats.java    |   88 +
 .../internal/parsers/TableMetaParserImpl.java   |   41 +-
 .../generators/AnalyzeTableQueryGenerator.java  |   40 +
 .../DeleteDatabaseQueryGenerator.java           |   48 +
 .../FetchColumnStatsQueryGenerator.java         |   40 +
 .../generators/RenameTableQueryGenerator.java   |   85 +
 .../view/hive20/resources/browser/DDLProxy.java |  218 +-
 .../hive20/resources/browser/DDLService.java    |  102 +-
 .../hive20/resources/browser/FileService.java   |   12 +-
 .../hive20/resources/files/FileService.java     |   20 +-
 .../view/hive20/resources/jobs/JobService.java  |   28 +-
 .../jobs/ResultsPaginationController.java       |  251 +-
 .../jobs/atsJobs/ATSParserFactory.java          |    9 +-
 .../hive20/resources/jobs/viewJobs/JobImpl.java |    4 +
 .../hive20/resources/system/SystemService.java  |   29 +
 .../system/ranger/RangerException.java          |   56 +
 .../resources/system/ranger/RangerService.java  |  317 +++
 .../view/hive20/utils/AuthorizationChecker.java |   74 +
 .../utils/MetaDataManagerEventSubmitter.java    |   43 +
 .../view/hive20/utils/SharedObjectsFactory.java |   14 +-
 .../resources/ui/app/adapters/application.js    |    1 +
 .../main/resources/ui/app/adapters/database.js  |    4 +
 .../src/main/resources/ui/app/adapters/job.js   |   11 +
 .../src/main/resources/ui/app/adapters/ping.js  |    5 +
 .../resources/ui/app/adapters/saved-query.js    |   26 +
 .../src/main/resources/ui/app/adapters/table.js |    7 +-
 .../ui/app/components/confirm-dialog.js         |   46 +
 .../ui/app/components/database-drop-confirm.js  |   32 -
 .../ui/app/components/database-not-empty.js     |   29 -
 .../ui/app/components/export-result.js          |   54 +
 .../resources/ui/app/components/info-dialog.js  |   38 +
 .../components/multiple-database-search-bar.js  |   30 +-
 .../resources/ui/app/components/query-editor.js |    5 +
 .../ui/app/components/query-result-table.js     |   52 +-
 .../app/components/table-advanced-settings.js   |    1 -
 .../ui/app/configs/table-level-tabs.js          |    6 +
 .../resources/ui/app/configs/top-level-tabs.js  |    2 +-
 .../ui/app/helpers/format-column-size.js        |   39 +
 .../main/resources/ui/app/models/saved-query.js |   29 +
 .../main/resources/ui/app/models/worksheet.js   |   37 +
 .../hive20/src/main/resources/ui/app/router.js  |    5 +-
 .../main/resources/ui/app/routes/databases.js   |   43 +-
 .../ui/app/routes/databases/database/tables.js  |    5 +
 .../app/routes/databases/database/tables/new.js |   25 +-
 .../routes/databases/database/tables/table.js   |   53 +-
 .../databases/database/tables/table/auth.js     |   27 +
 .../databases/database/tables/table/columns.js  |   10 +
 .../resources/ui/app/routes/databases/index.js  |    1 +
 .../src/main/resources/ui/app/routes/queries.js |   75 +
 .../resources/ui/app/routes/queries/index.js    |   29 +
 .../resources/ui/app/routes/queries/query.js    |  387 +++
 .../src/main/resources/ui/app/routes/query.js   |    2 +-
 .../resources/ui/app/routes/savedqueries.js     |   79 +
 .../resources/ui/app/services/create-table.js   |  182 --
 .../src/main/resources/ui/app/services/jobs.js  |   26 +
 .../src/main/resources/ui/app/services/query.js |   20 +
 .../resources/ui/app/services/saved-queries.js  |   52 +
 .../ui/app/services/table-operations.js         |  204 ++
 .../src/main/resources/ui/app/styles/app.scss   |   29 +
 .../ui/app/templates/components/column-item.hbs |    2 +-
 .../app/templates/components/confirm-dialog.hbs |   39 +
 .../components/database-drop-confirm.hbs        |   37 -
 .../templates/components/database-not-empty.hbs |   38 -
 .../app/templates/components/export-result.hbs  |   46 +
 .../ui/app/templates/components/info-dialog.hbs |   38 +
 .../app/templates/components/jobs-browser.hbs   |    2 +-
 .../components/multiple-database-search-bar.hbs |    3 +-
 .../templates/components/query-result-table.hbs |   48 +-
 .../templates/components/table-properties.hbs   |    2 +-
 .../resources/ui/app/templates/databases.hbs    |   41 +-
 .../databases/database/tables/table.hbs         |   20 +-
 .../database/tables/table/auth-error.hbs        |   35 +
 .../database/tables/table/auth-loading.hbs      |   23 +
 .../databases/database/tables/table/auth.hbs    |   53 +
 .../databases/database/tables/table/columns.hbs |   14 +-
 .../databases/database/tables/table/details.hbs |    2 +-
 .../database/tables/table/partitions.hbs        |    2 +-
 .../databases/database/tables/table/storage.hbs |    2 +-
 .../main/resources/ui/app/templates/queries.hbs |   32 +
 .../ui/app/templates/queries/query.hbs          |  128 +
 .../main/resources/ui/app/templates/query.hbs   |   19 +-
 .../resources/ui/app/templates/savedqueries.hbs |   52 +
 .../views/hive20/src/main/resources/view.xml    |   42 +
 .../DeleteDatabaseQueryGeneratorSpecTest.groovy |   65 +
 .../RenameTableQueryGeneratorSpecTest.groovy    |  104 +
 .../HIVE20_ENVIRONMENT.postman_environment.json |   16 +
 .../rest/postman/hive2.postman_collection.json  |  107 -
 .../rest/postman/hive20.postman_collection.json |  566 +++++
 contrib/views/hueambarimigration/pom.xml        |    5 -
 .../HiveSavedQueryMigrationImplementation.java  |   40 +-
 .../PigScriptMigrationImplementation.java       |   15 +-
 .../src/main/resources/view.xml                 |    2 +
 .../views/jobs/src/main/resources/ui/.gitignore |    3 +-
 contrib/views/pig/pom.xml                       |   34 +-
 .../view/pig/resources/files/FileService.java   |   22 +-
 .../ambari/view/pig/utils/UserLocalObjects.java |   16 +-
 contrib/views/pig/src/main/resources/view.xml   |    7 +
 contrib/views/pom.xml                           |    3 +-
 contrib/views/utils/pom.xml                     |   82 +-
 .../view/utils/hdfs/ConfigurationBuilder.java   |  159 +-
 .../apache/ambari/view/utils/hdfs/HdfsApi.java  |   29 +-
 .../apache/ambari/view/utils/hdfs/HdfsUtil.java |   67 +-
 contrib/views/wfmanager/pom.xml                 |   20 +
 .../apache/oozie/ambari/view/AmbariIOUtil.java  |    7 +-
 .../apache/oozie/ambari/view/FileServices.java  |   20 +-
 .../apache/oozie/ambari/view/HDFSFileUtils.java |   21 +-
 .../apache/oozie/ambari/view/OozieDelegate.java |   13 +-
 .../ambari/view/OozieProxyImpersonator.java     |    4 +-
 .../WorkflowsManagerResource.java               |    2 +-
 .../resources/ui/app/adapters/application.js    |   27 +
 .../resources/ui/app/components/asset-list.js   |   15 +-
 .../ui/app/components/bundle-config.js          |   61 +-
 .../resources/ui/app/components/coord-config.js |   83 +-
 .../app/components/design/proj-manager-tabs.js  |   21 +
 .../ui/app/components/designer-workspace.js     |   51 +-
 .../resources/ui/app/components/drafts-wf.js    |   69 +
 .../resources/ui/app/components/file-picker.js  |   32 +
 .../resources/ui/app/components/file-upload.js  |   19 +-
 .../ui/app/components/flow-designer.js          |   54 +-
 .../resources/ui/app/components/hdfs-browser.js |    2 +
 .../resources/ui/app/components/job-config.js   |   72 +-
 .../resources/ui/app/components/job-details.js  |   29 +-
 .../main/resources/ui/app/components/save-wf.js |   41 +-
 .../ui/app/components/search-create-new-bar.js  |    3 +
 .../resources/ui/app/components/sla-info.js     |    1 +
 .../ui/app/components/workflow-action-editor.js |    9 +
 .../ui/app/components/workflow-job-details.js   |    3 +
 .../ui/app/domain/actionjob_hanlder.js          |   35 +-
 .../coordinator/coordinator-xml-importer.js     |   12 +
 .../ui/app/domain/custom-mapping-handler.js     |   28 +
 .../ui/app/domain/cytoscape-flow-renderer.js    |    7 +-
 .../resources/ui/app/domain/mapping-utils.js    |   16 +-
 .../ui/app/domain/workflow-json-importer.js     |    3 +
 .../ui/app/domain/workflow-path-util.js         |    2 +-
 .../ui/app/domain/workflow-xml-generator.js     |    5 +
 .../ui/app/helpers/format-unicode-date.js       |   24 +
 .../main/resources/ui/app/models/wfproject.js   |   25 +
 .../src/main/resources/ui/app/router.js         |    1 +
 .../src/main/resources/ui/app/routes/design.js  |   15 +-
 .../ui/app/routes/design/dashboardtab.js        |    2 +-
 .../ui/app/routes/design/proj-manager-tab.js    |   29 +
 .../ui/app/services/hdfs-file-uploader.js       |   20 +
 .../ui/app/services/property-extractor.js       |    4 +
 .../src/main/resources/ui/app/styles/app.less   |   48 +-
 .../ui/app/templates/components/arg-config.hbs  |   13 +-
 .../app/templates/components/asset-config.hbs   |    3 +-
 .../ui/app/templates/components/asset-list.hbs  |    9 +-
 .../app/templates/components/asset-manager.hbs  |    5 +-
 .../app/templates/components/bundle-config.hbs  |   16 +-
 .../app/templates/components/coord-config.hbs   |   55 +-
 .../components/design/proj-manager-tabs.hbs     |   18 +
 .../templates/components/designer-workspace.hbs |   18 +-
 .../ui/app/templates/components/drafts-wf.hbs   |  121 +
 .../ui/app/templates/components/file-config.hbs |    8 +-
 .../app/templates/components/flow-designer.hbs  |   69 +-
 .../app/templates/components/hdfs-browser.hbs   |   13 +-
 .../ui/app/templates/components/java-action.hbs |    4 +-
 .../ui/app/templates/components/job-config.hbs  |  137 +-
 .../ui/app/templates/components/job-details.hbs |   20 +-
 .../templates/components/name-value-config.hbs  |   16 +-
 .../app/templates/components/prepare-config.hbs |    8 +-
 .../ui/app/templates/components/save-wf.hbs     |    2 +-
 .../components/search-create-new-bar.hbs        |    2 +-
 .../app/templates/components/spark-action.hbs   |    4 +-
 .../ui/app/templates/components/ssh-action.hbs  |    2 +-
 .../components/workflow-action-editor.hbs       |   11 +
 .../templates/components/workflow-actions.hbs   |    4 +-
 .../components/workflow-job-action-info.hbs     |    8 -
 .../components/workflow-job-details.hbs         |   14 +
 .../main/resources/ui/app/templates/design.hbs  |    4 +-
 .../app/templates/design/proj-manager-tab.hbs   |   19 +
 .../main/resources/ui/app/utils/constants.js    |    1 +
 .../addon/components/directory-viewer.js        |   17 +-
 .../components/design/proj-manager-tabs-test.js |   41 +
 .../integration/components/drafts-wf-test.js    |   41 +
 .../ui/tests/unit/adapters/application-test.js  |   29 +
 .../unit/helpers/format-unicode-date-test.js    |   27 +
 .../ui/tests/unit/models/wfproject-test.js      |   29 +
 .../unit/routes/design/proj-manager-tab-test.js |   28 +
 .../unit/services/hdfs-file-uploader-test.js    |   28 +
 .../views/wfmanager/src/main/resources/view.xml |   27 +-
 docs/pom.xml                                    |   12 +
 817 files changed, 31584 insertions(+), 11494 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2c904e/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 71162e4,2ec43cf..9ebb6e8
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@@ -100,9 -98,9 +99,10 @@@ import org.apache.ambari.server.state.S
  import org.apache.ambari.server.state.StackId;
  import org.apache.ambari.server.state.StackInfo;
  import org.apache.ambari.server.state.UpgradeContext;
+ import org.apache.ambari.server.state.UpgradeContextFactory;
  import org.apache.ambari.server.state.UpgradeHelper;
  import org.apache.ambari.server.state.UpgradeHelper.UpgradeGroupHolder;
 +import org.apache.ambari.server.state.repository.VersionDefinitionXml;
  import org.apache.ambari.server.state.stack.ConfigUpgradePack;
  import org.apache.ambari.server.state.stack.PrereqCheckStatus;
  import org.apache.ambari.server.state.stack.UpgradePack;
@@@ -872,20 -858,22 +880,25 @@@ public class UpgradeResourceProvider ex
      // HDP 2.2 to 2.4 should start with HDP 2.2 and merge in HDP 2.3's config-upgrade.xml
      ConfigUpgradePack configUpgradePack = ConfigurationPackBuilder.build(pack, sourceStackId);
  
+     // TODO: for now, all service components are transitioned to upgrading state
+     // TODO: When performing patch upgrade, we should only target supported services/components
+     // from upgrade pack
+     @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
      Set<Service> services = new HashSet<>(cluster.getServices().values());
 +
 +    @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
      Map<Service, Set<ServiceComponent>> targetComponents = new HashMap<>();
      for (Service service: services) {
 -      Set<ServiceComponent> serviceComponents =
 -        new HashSet<>(service.getServiceComponents().values());
 -      targetComponents.put(service, serviceComponents);
 +      if (upgradeContext.isServiceSupported(service.getName())) {
 +        Set<ServiceComponent> serviceComponents = new HashSet<>(service.getServiceComponents().values());
 +        targetComponents.put(service, serviceComponents);
 +      }
      }
  
-     // TODO: is there any extreme case when we need to set component upgrade state back to NONE
-     // from IN_PROGRESS (e.g. canceled downgrade)
-     s_upgradeHelper.putComponentsToUpgradingState(version, targetComponents);
+     // !!! determine which stack to check for component isAdvertised
+     StackId componentStack = upgradeContext.getDirection() == Direction.UPGRADE ?
+         upgradeContext.getTargetStackId() : upgradeContext.getOriginalStackId();
+     s_upgradeHelper.putComponentsToUpgradingState(version, targetComponents, componentStack);
  
      for (UpgradeGroupHolder group : groups) {
        boolean skippable = group.skippable;

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2c904e/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb2c904e/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------


[07/50] [abbrv] ambari git commit: AMBARI-19556. Ambari Yarn Queue Manager allows two same queue name. (Akhil PB via gauravn7)

Posted by nc...@apache.org.
AMBARI-19556. Ambari Yarn Queue Manager allows two same queue name. (Akhil PB via gauravn7)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c326ce4e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c326ce4e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c326ce4e

Branch: refs/heads/branch-dev-patch-upgrade
Commit: c326ce4e63ccc941df925198125ab42cc74f385b
Parents: cba69d9
Author: Gaurav Nagar <gr...@gmail.com>
Authored: Tue Jan 17 15:47:12 2017 +0530
Committer: Gaurav Nagar <gr...@gmail.com>
Committed: Tue Jan 17 15:48:12 2017 +0530

----------------------------------------------------------------------
 .../src/main/resources/ui/app/components/pathInput.js             | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c326ce4e/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/pathInput.js
----------------------------------------------------------------------
diff --git a/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/pathInput.js b/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/pathInput.js
index e71d226..d3471bf 100644
--- a/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/pathInput.js
+++ b/contrib/views/capacity-scheduler/src/main/resources/ui/app/components/pathInput.js
@@ -27,7 +27,8 @@ App.PathInputComponent = Em.Component.extend({
           basedir = path.substr(0,path.lastIndexOf('.')) || currentBasedir,
           queuePath = [basedir,path.substr(path.lastIndexOf('.')+1)].join('.'),
           queueName = path.substr(path.lastIndexOf('.')+1),
-          alreadyExists = this.get('queues.firstObject.store').hasRecordForId('queue',queuePath.toLowerCase());
+          deletedQueues = this.get('queues.firstObject.store').get('deletedQueues'),
+          alreadyExists = (this.get('queues').findBy('name',queueName)||deletedQueues.findBy('name',queueName))?true:false;
 
       if (!path || !queueName) {
         return this.setProperties({'isError':true,'errorMessage':'Enter queue name.'});