You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rl...@apache.org on 2017/05/03 20:11:16 UTC

[1/8] ambari git commit: AMBARI-20760. After pam setup- Hive View user home test fails (Anita Jebaraj via rlevas)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-20859 8549682b5 -> b3f7d9e42


AMBARI-20760. After pam setup- Hive View user home test fails (Anita Jebaraj via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f167236c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f167236c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f167236c

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: f167236c24501a0d14afccfbc53a2c648cb21731
Parents: 8549682
Author: Anita Jebaraj <aj...@us.ibm.com>
Authored: Mon May 1 16:33:59 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Mon May 1 16:33:59 2017 -0400

----------------------------------------------------------------------
 .../AmbariPamAuthenticationProvider.java        | 18 ++------
 .../AmbariPamAuthenticationProviderTest.java    | 45 +++++++++++++++++---
 2 files changed, 43 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f167236c/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProvider.java
index ca7cd31..b3fb861 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProvider.java
@@ -17,7 +17,6 @@
  */
 package org.apache.ambari.server.security.authorization;
 
-import java.security.Principal;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
@@ -40,7 +39,6 @@ import org.springframework.security.authentication.AuthenticationServiceExceptio
 import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
 import org.springframework.security.core.Authentication;
 import org.springframework.security.core.AuthenticationException;
-import org.springframework.security.core.context.SecurityContextHolder;
 
 import com.google.inject.Inject;
 
@@ -124,18 +122,10 @@ public class AmbariPamAuthenticationProvider implements AuthenticationProvider {
               users.getUserAuthorities(userName, UserType.PAM);
 
           final User user = users.getUser(userName, UserType.PAM);
-
-          Principal principal = new Principal() {
-            @Override
-            public String getName() {
-              return user.getUserName();
-            }
-          };
-
-          UsernamePasswordAuthenticationToken token = new UsernamePasswordAuthenticationToken(principal, null, userAuthorities);
-          SecurityContextHolder.getContext().setAuthentication(token);
-          return token;
-
+ 
+          Authentication authToken = new AmbariUserAuthentication(passwd, user, userAuthorities);
+          authToken.setAuthenticated(true);
+          return authToken;   
         } catch (PAMException ex) {
           LOG.error("Unable to sign in. Invalid username/password combination - " + ex.getMessage());
           Throwable t = ex.getCause();

http://git-wip-us.apache.org/repos/asf/ambari/blob/f167236c/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
index 5b3acd0..b7272c5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AmbariPamAuthenticationProviderTest.java
@@ -17,27 +17,31 @@
  */
 package org.apache.ambari.server.security.authorization;
 
-import static junit.framework.Assert.assertEquals;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
 
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 
 import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.audit.AuditLoggerModule;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.entities.PrincipalEntity;
+import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.security.ClientSecurityType;
+
 import org.easymock.EasyMock;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.jvnet.libpam.PAM;
 import org.jvnet.libpam.UnixUser;
-import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
 import org.springframework.security.core.Authentication;
 import org.springframework.security.core.AuthenticationException;
+import org.springframework.security.crypto.password.PasswordEncoder;
 
 import com.google.inject.Guice;
 import com.google.inject.Inject;
@@ -50,10 +54,16 @@ public class AmbariPamAuthenticationProviderTest {
   private static Injector injector;
 
   @Inject
+  PasswordEncoder passwordEncoder;
+  @Inject
   private AmbariPamAuthenticationProvider authenticationProvider;
   @Inject
   Configuration configuration;
 
+  private static final String TEST_USER_NAME = "userName";
+  private static final String TEST_USER_PASS = "userPass";
+  private static final String TEST_USER_INCORRECT_PASS = "userIncorrectPass";
+
   @Before
   public void setUp() {
     injector = Guice.createInjector(new AuditLoggerModule(), new AuthorizationTestModule());
@@ -70,7 +80,10 @@ public class AmbariPamAuthenticationProviderTest {
 
   @Test(expected = AuthenticationException.class)
   public void testBadCredential() throws Exception {
-    Authentication authentication = new UsernamePasswordAuthenticationToken("notFound", "wrong");
+    UserEntity userEntity = combineUserEntity();
+    User user = new User(userEntity);
+    Collection<AmbariGrantedAuthority> userAuthorities = Collections.singletonList(createNiceMock(AmbariGrantedAuthority.class));
+    Authentication authentication = new AmbariUserAuthentication("wrong", user, userAuthorities);
     authenticationProvider.authenticate(authentication);
   }
 
@@ -78,20 +91,40 @@ public class AmbariPamAuthenticationProviderTest {
   public void testAuthenticate() throws Exception {
     PAM pam = createNiceMock(PAM.class);
     UnixUser unixUser = createNiceMock(UnixUser.class);
+    UserEntity userEntity = combineUserEntity();
+    User user = new User(userEntity);
+    Collection<AmbariGrantedAuthority> userAuthorities = Collections.singletonList(createNiceMock(AmbariGrantedAuthority.class));
     expect(pam.authenticate(EasyMock.anyObject(String.class), EasyMock.anyObject(String.class))).andReturn(unixUser).atLeastOnce();
     expect(unixUser.getGroups()).andReturn(new HashSet<>(Arrays.asList("group"))).atLeastOnce();
     EasyMock.replay(unixUser);
     EasyMock.replay(pam);
-    Authentication authentication = new UsernamePasswordAuthenticationToken("allowedUser", "password");
+    Authentication authentication = new AmbariUserAuthentication("userPass", user, userAuthorities);
     Authentication result = authenticationProvider.authenticateViaPam(pam,authentication);
-    assertEquals("allowedUser", result.getName());
+    Assert.assertNotNull(result);
+    Assert.assertEquals(true, result.isAuthenticated());
+    Assert.assertTrue(result instanceof AmbariUserAuthentication);
   }
 
   @Test
   public void testDisabled() throws Exception {
+    UserEntity userEntity = combineUserEntity();
+    User user = new User(userEntity);
+    Collection<AmbariGrantedAuthority> userAuthorities = Collections.singletonList(createNiceMock(AmbariGrantedAuthority.class));
     configuration.setClientSecurityType(ClientSecurityType.LOCAL);
-    Authentication authentication = new UsernamePasswordAuthenticationToken("allowedUser", "password");
+    Authentication authentication = new AmbariUserAuthentication("userPass", user, userAuthorities);
     Authentication auth = authenticationProvider.authenticate(authentication);
     Assert.assertTrue(auth == null);
   }
+
+  private UserEntity combineUserEntity() {
+    PrincipalEntity principalEntity = new PrincipalEntity();
+    UserEntity userEntity = new UserEntity();
+    userEntity.setUserId(1);
+    userEntity.setUserName(UserName.fromString(TEST_USER_NAME));
+    userEntity.setUserPassword(passwordEncoder.encode(TEST_USER_PASS));
+    userEntity.setUserType(UserType.PAM);
+    userEntity.setPrincipal(principalEntity);
+    return userEntity;
+  }
+
 }


[7/8] ambari git commit: AMBARI-20903 RepoVersion parsing error should be more specific (dili)

Posted by rl...@apache.org.
AMBARI-20903 RepoVersion parsing error should be more specific (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6e2d3219
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6e2d3219
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6e2d3219

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 6e2d32196b065530263b1468d50d710db8b8eca5
Parents: 2b146d9
Author: Di Li <di...@apache.org>
Authored: Tue May 2 14:25:18 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Tue May 2 14:25:18 2017 -0400

----------------------------------------------------------------------
 .../ambari/server/orm/entities/RepositoryVersionEntity.java      | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6e2d3219/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
index d3705f3..f5d669e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
@@ -249,8 +249,8 @@ public class RepositoryVersionEntity {
       try {
         return repositoryVersionHelperProvider.get().parseOperatingSystems(operatingSystems);
       } catch (Exception ex) {
-        // Should never happen as we validate json before storing it to DB
-        LOG.error("Could not parse operating systems json stored in database:" + operatingSystems, ex);
+        String msg = String.format("Failed to parse repository from OS/Repo information in the database: %s. Required fields: repo_name, repo_id, base_url", operatingSystems);
+        LOG.error(msg, ex);
       }
     }
     return Collections.emptyList();


[5/8] ambari git commit: AMBARI-20874. Mask passwords in Request resource responses (rlevas)

Posted by rl...@apache.org.
AMBARI-20874. Mask passwords in Request resource responses (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/34761fcd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/34761fcd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/34761fcd

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 34761fcdcec142d1d6e1dcf76febb9ce526ae927
Parents: 13a981c
Author: Robert Levas <rl...@hortonworks.com>
Authored: Tue May 2 10:52:18 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue May 2 10:52:18 2017 -0400

----------------------------------------------------------------------
 .../internal/RequestResourceProvider.java       | 12 ++++++-
 .../internal/RequestResourceProviderTest.java   | 35 ++++++++++++++++++++
 2 files changed, 46 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/34761fcd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
index 57e7024..c405995 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RequestResourceProvider.java
@@ -69,6 +69,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.topology.LogicalRequest;
 import org.apache.ambari.server.topology.TopologyManager;
+import org.apache.ambari.server.utils.SecretReference;
 import org.apache.commons.lang.StringUtils;
 
 import com.google.common.collect.Sets;
@@ -741,7 +742,16 @@ public class RequestResourceProvider extends AbstractControllerResourceProvider
     setResourceProperty(resource, REQUEST_ID_PROPERTY_ID, entity.getRequestId(), requestedPropertyIds);
     setResourceProperty(resource, REQUEST_CONTEXT_ID, entity.getRequestContext(), requestedPropertyIds);
     setResourceProperty(resource, REQUEST_TYPE_ID, entity.getRequestType(), requestedPropertyIds);
-    setResourceProperty(resource, REQUEST_INPUTS_ID, entity.getInputs(), requestedPropertyIds);
+
+    // Mask any sensitive data fields in the inputs data structure
+    if (isPropertyRequested(REQUEST_INPUTS_ID, requestedPropertyIds)) {
+      String value = entity.getInputs();
+      if (!StringUtils.isBlank(value)) {
+        value = SecretReference.maskPasswordInPropertyMap(value);
+      }
+      resource.setProperty(REQUEST_INPUTS_ID, value);
+    }
+
     setResourceProperty(resource, REQUEST_RESOURCE_FILTER_ID,
         org.apache.ambari.server.actionmanager.Request.filtersFromEntity(entity),
         requestedPropertyIds);

http://git-wip-us.apache.org/repos/asf/ambari/blob/34761fcd/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
index feedc74..6bc856d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
@@ -83,6 +83,7 @@ import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.LogicalRequest;
 import org.apache.ambari.server.topology.TopologyManager;
 import org.apache.ambari.server.topology.TopologyRequest;
+import org.apache.ambari.server.utils.SecretReference;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.junit.After;
@@ -98,6 +99,8 @@ import org.springframework.security.core.context.SecurityContextHolder;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
+import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
 
 /**
  * RequestResourceProvider tests.
@@ -142,6 +145,10 @@ public class RequestResourceProviderTest {
     field.setAccessible(true);
     field.set(null, topologyManager);
 
+    field = SecretReference.class.getDeclaredField("gson");
+    field.setAccessible(true);
+    field.set(null, new Gson());
+
     AuthorizationHelperInitializer.viewInstanceDAOReturningNull();
   }
 
@@ -258,12 +265,38 @@ public class RequestResourceProviderTest {
   public void testGetResources() throws Exception {
     Resource.Type type = Resource.Type.Request;
 
+    String storedInputs = "{" +
+        " \"hosts\": \"host1\"," +
+        " \"check_execute_list\": \"last_agent_env_check,installed_packages,existing_repos,transparentHugePage\"," +
+        " \"jdk_location\": \"http://ambari_server.home:8080/resources/\"," +
+        " \"threshold\": \"20\"," +
+        " \"password\": \"for your eyes only\"," +
+        " \"foo_password\": \"for your eyes only\"," +
+        " \"passwd\": \"for your eyes only\"," +
+        " \"foo_passwd\": \"for your eyes only\"" +
+        " }";
+    String cleanedInputs = SecretReference.maskPasswordInPropertyMap(storedInputs);
+
+    // Make sure SecretReference.maskPasswordInPropertyMap properly masked the password fields in cleanedInputs...
+    Gson gson = new Gson();
+    Map<String, String> map = gson.fromJson(cleanedInputs, new TypeToken<Map<String, String>>() {}.getType());
+    for (Map.Entry<String, String> entry : map.entrySet()) {
+      String name = entry.getKey();
+      if (name.contains("password") || name.contains("passwd")) {
+        Assert.assertEquals("SECRET", entry.getValue());
+      }
+      else {
+        Assert.assertFalse("SECRET".equals(entry.getValue()));
+      }
+    }
+
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
     ActionManager actionManager = createNiceMock(ActionManager.class);
     RequestEntity requestMock = createNiceMock(RequestEntity.class);
 
     expect(requestMock.getRequestContext()).andReturn("this is a context").anyTimes();
     expect(requestMock.getRequestId()).andReturn(100L).anyTimes();
+    expect(requestMock.getInputs()).andReturn(storedInputs).anyTimes();
 
     Capture<Collection<Long>> requestIdsCapture = newCapture();
 
@@ -287,6 +320,7 @@ public class RequestResourceProviderTest {
 
     propertyIds.add(RequestResourceProvider.REQUEST_ID_PROPERTY_ID);
     propertyIds.add(RequestResourceProvider.REQUEST_STATUS_PROPERTY_ID);
+    propertyIds.add(RequestResourceProvider.REQUEST_INPUTS_ID);
 
     Predicate predicate = new PredicateBuilder().property(RequestResourceProvider.REQUEST_ID_PROPERTY_ID).equals("100").
       toPredicate();
@@ -297,6 +331,7 @@ public class RequestResourceProviderTest {
     for (Resource resource : resources) {
       Assert.assertEquals(100L, (long) (Long) resource.getPropertyValue(RequestResourceProvider.REQUEST_ID_PROPERTY_ID));
       Assert.assertEquals("IN_PROGRESS", resource.getPropertyValue(RequestResourceProvider.REQUEST_STATUS_PROPERTY_ID));
+      Assert.assertEquals(cleanedInputs, resource.getPropertyValue(RequestResourceProvider.REQUEST_INPUTS_ID));
     }
 
     // verify


[6/8] ambari git commit: AMBARI-20855 Removing unused script: relocate_host_components.py (Attila Magyar via adoroszlai)

Posted by rl...@apache.org.
AMBARI-20855 Removing unused script: relocate_host_components.py (Attila Magyar via adoroszlai)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2b146d9c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2b146d9c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2b146d9c

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 2b146d9c421b12d2b11def840aca69d1dead4428
Parents: 34761fc
Author: Attila Magyar <am...@hortonworks.com>
Authored: Tue May 2 17:34:26 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Tue May 2 17:34:26 2017 +0200

----------------------------------------------------------------------
 .../scripts/relocate_host_components.py         | 489 -------------------
 1 file changed, 489 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2b146d9c/ambari-server/src/main/resources/scripts/relocate_host_components.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/relocate_host_components.py b/ambari-server/src/main/resources/scripts/relocate_host_components.py
deleted file mode 100644
index 1b9ad1e..0000000
--- a/ambari-server/src/main/resources/scripts/relocate_host_components.py
+++ /dev/null
@@ -1,489 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import sys
-import os
-import logging
-import tempfile
-import urllib2
-import socket
-import json
-import base64
-import time
-
-AMBARI_HOSTNAME = None
-AMBARI_PORT = 8080
-CLUSTER_NAME = None
-PROTOCOL = "http"
-USERNAME = "admin"
-PASSWORD = "admin"
-DEFAULT_TIMEOUT = 10 # seconds
-START_ON_RELOCATE = False
-
-# Supported Actions
-RELOCATE_ACTION = 'relocate'
-ALLOWED_ACTUAL_STATES_FOR_RELOCATE = [ 'INIT', 'UNKNOWN', 'DISABLED', 'UNINSTALLED' ]
-ALLOWED_HOST_STATUS_FOR_RELOCATE = [ 'HEALTHY' ]
-STATUS_WAIT_TIMEOUT = 120 # seconds
-STATUS_CHECK_INTERVAL = 10 # seconds
-
-# API calls
-GET_CLUSTERS_URI = "/api/v1/clusters/"
-GET_HOST_COMPONENTS_URI = "/api/v1/clusters/{0}/services/{1}/components/{2}" +\
-                          "?fields=host_components"
-GET_HOST_COMPONENT_DESIRED_STATE_URI = "/api/v1/clusters/{0}/hosts/{1}" +\
-                                       "/host_components/{2}" +\
-                                       "?fields=HostRoles/desired_state"
-GET_HOST_COMPONENT_STATE_URI = "/api/v1/clusters/{0}/hosts/{1}" +\
-                               "/host_components/{2}" +\
-                               "?fields=HostRoles/state"
-GET_HOST_STATE_URL = "/api/v1/clusters/{0}/hosts/{1}?fields=Hosts/host_state"
-HOST_COMPONENT_URI = "/api/v1/clusters/{0}/hosts/{1}/host_components/{2}"
-ADD_HOST_COMPONENT_URI = "/api/v1/clusters/{0}/hosts?Hosts/host_name={1}"
-
-logger = logging.getLogger()
-
-
-
-class PreemptiveBasicAuthHandler(urllib2.BaseHandler):
-
-  def __init__(self):
-    password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
-    password_mgr.add_password(None, getUrl(''), USERNAME, PASSWORD)
-    self.passwd = password_mgr
-    self.add_password = self.passwd.add_password
-
-  def http_request(self, req):
-    uri = req.get_full_url()
-    user = USERNAME
-    pw = PASSWORD
-    raw = "%s:%s" % (user, pw)
-    auth = 'Basic %s' % base64.b64encode(raw).strip()
-    req.add_unredirected_header('Authorization', auth)
-    return req
-
-
-class AmbariResource:
-
-  def __init__(self, serviceName, componentName):
-    self.serviveName = serviceName
-    self.componentName = componentName
-    self.isInitialized = False
-
-  def initializeResource(self):
-    global CLUSTER_NAME
-    if CLUSTER_NAME is None:
-      CLUSTER_NAME = self.findClusterName()
-
-    if self.serviveName is None:
-      raise Exception('Service name undefined')
-
-    if self.componentName is None:
-      raise Exception('Component name undefined')
-
-    handler = PreemptiveBasicAuthHandler()
-    opener = urllib2.build_opener(handler)
-    # Install opener for all requests
-    urllib2.install_opener(opener)
-    self.urlOpener = opener
-
-    self.old_hostname = self.getHostname()
-
-    self.isInitialized = True
-
-
-  def relocate(self, new_hostname):
-    if not self.isInitialized:
-      raise Exception('Resource not initialized')
-
-    # If old and new hostname are the same exit harmlessly
-    if self.old_hostname == new_hostname:
-      logger.error('New hostname is same as existing host name, %s' % self.old_hostname)
-      sys.exit(2)
-    pass
-
-    try:
-      self.verifyHostComponentStatus(self.old_hostname, new_hostname, self.componentName)
-    except Exception, e:
-      logger.error("Exception caught on verify relocate request.")
-      logger.error(e.message)
-      sys.exit(3)
-
-    # Put host component in Maintenance state
-    self.updateHostComponentStatus(self.old_hostname, self.componentName,
-                                   "Disable", "DISABLED")
-
-    # Delete current host component
-    self.deleteHostComponent(self.old_hostname, self.componentName)
-
-    # Add component on the new host
-    self.addHostComponent(new_hostname, self.componentName)
-
-    # Install host component
-    self.updateHostComponentStatus(new_hostname, self.componentName,
-                                   "Installing", "INSTALLED")
-
-    # Wait on install
-    self.waitOnHostComponentUpdate(new_hostname, self.componentName,
-                                   "INSTALLED")
-
-    if START_ON_RELOCATE:
-      # Start host component
-      self.updateHostComponentStatus(new_hostname, self.componentName,
-                                     "Starting", "STARTED")
-
-      # Wait on start
-      self.waitOnHostComponentUpdate(new_hostname, self.componentName, "STARTED")
-    pass
-  pass
-
-  def waitOnHostComponentUpdate(self, hostname, componentName, status):
-    logger.info("Waiting for host component status to update ...")
-    sleep_itr = 0
-    state = None
-    while sleep_itr < STATUS_WAIT_TIMEOUT:
-      try:
-        state = self.getHostComponentState(hostname, componentName)
-        if status == state:
-          logger.info("Status update successful. status: %s" % state)
-          return
-        pass
-      except Exception, e:
-        logger.error("Caught an exception waiting for status update.. "
-                     "continuing to wait...")
-      pass
-
-      time.sleep(STATUS_CHECK_INTERVAL)
-      sleep_itr += STATUS_CHECK_INTERVAL
-    pass
-    if state and state != status:
-      logger.error("Timed out on wait, status unchanged. status = %s" % state)
-      sys.exit(1)
-    pass
-  pass
-
-  def addHostComponent(self, hostname, componentName):
-    data = '{"host_components":[{"HostRoles":{"component_name":"%s"}}]}' % self.componentName
-    req = urllib2.Request(getUrl(ADD_HOST_COMPONENT_URI.format(CLUSTER_NAME,
-                          hostname)), data)
-
-    req.add_header("X-Requested-By", "ambari_probe")
-    req.get_method = lambda: 'POST'
-    try:
-      logger.info("Adding host component: %s" % req.get_full_url())
-      resp = self.urlOpener.open(req)
-      self.logResponse('Add host component response: ', resp)
-    except Exception, e:
-      logger.error('Create host component failed, component: {0}, host: {1}'
-                    .format(componentName, hostname))
-      logger.error(e)
-      raise e
-    pass
-
-  def deleteHostComponent(self, hostname, componentName):
-    req = urllib2.Request(getUrl(HOST_COMPONENT_URI.format(CLUSTER_NAME,
-                                hostname, componentName)))
-    req.add_header("X-Requested-By", "ambari_probe")
-    req.get_method = lambda: 'DELETE'
-    try:
-      logger.info("Deleting host component: %s" % req.get_full_url())
-      resp = self.urlOpener.open(req)
-      self.logResponse('Delete component response: ', resp)
-    except Exception, e:
-      logger.error('Delete {0} failed.'.format(componentName))
-      logger.error(e)
-      raise e
-    pass
-
-  def updateHostComponentStatus(self, hostname, componentName, contextStr, status):
-    # Update host component
-    data = '{"RequestInfo":{"context":"%s %s"},"Body":{"HostRoles":{"state":"%s"}}}' % (contextStr, self.componentName, status)
-    req = urllib2.Request(getUrl(HOST_COMPONENT_URI.format(CLUSTER_NAME,
-                                hostname, componentName)), data)
-    req.add_header("X-Requested-By", "ambari_probe")
-    req.get_method = lambda: 'PUT'
-    try:
-      logger.info("%s host component: %s" % (contextStr, req.get_full_url()))
-      resp = self.urlOpener.open(req)
-      self.logResponse('Update host component response: ', resp)
-    except Exception, e:
-      logger.error('Update Status {0} failed.'.format(componentName))
-      logger.error(e)
-      raise e
-    pass
-
-  def verifyHostComponentStatus(self, old_hostname, new_hostname, componentName):
-    # Check desired state of host component is not STOPPED or host is
-    # unreachable
-    actualState = self.getHostComponentState(old_hostname, componentName)
-
-    if actualState not in ALLOWED_ACTUAL_STATES_FOR_RELOCATE:
-      raise Exception('Aborting relocate action since host component '
-                      'state is %s' % actualState)
-
-    hostState = self.getHostSatus(new_hostname)
-    if hostState not in ALLOWED_HOST_STATUS_FOR_RELOCATE:
-      raise Exception('Aborting relocate action since host state is %s' % hostState)
-
-    pass
-
-  def getHostSatus(self, hostname):
-    hostStateUrl = getUrl(GET_HOST_STATE_URL.format(CLUSTER_NAME, hostname))
-
-    logger.info("Requesting host status: %s " % hostStateUrl)
-    urlResponse = self.urlOpener.open(hostStateUrl)
-    state = None
-
-    if urlResponse:
-      response = urlResponse.read()
-      data = json.loads(response)
-      logger.debug('Response from getHostSatus: %s' % data)
-      if data:
-        try:
-          hostsInfo = data.get('Hosts')
-          if not hostsInfo:
-            raise Exception('Cannot find host state for host: {1}'.format(hostname))
-
-          state = hostsInfo.get('host_state')
-        except Exception, e:
-          logger.error('Unable to parse json data. %s' % data)
-          raise e
-        pass
-
-      else:
-        logger.error("Unable to retrieve host state.")
-      pass
-
-    return state
-
-
-  def getHostComponentState(self, hostname, componentName):
-    hostStatusUrl = getUrl(GET_HOST_COMPONENT_STATE_URI.format(CLUSTER_NAME,
-                                hostname, componentName))
-
-    logger.info("Requesting host component state: %s " % hostStatusUrl)
-    urlResponse = self.urlOpener.open(hostStatusUrl)
-    state = None
-
-    if urlResponse:
-      response = urlResponse.read()
-      data = json.loads(response)
-      logger.debug('Response from getHostComponentState: %s' % data)
-      if data:
-        try:
-          hostRoles = data.get('HostRoles')
-          if not hostRoles:
-            raise Exception('Cannot find host component state for component: ' +\
-                            '{0}, host: {1}'.format(componentName, hostname))
-
-          state = hostRoles.get('state')
-        except Exception, e:
-          logger.error('Unable to parse json data. %s' % data)
-          raise e
-        pass
-
-      else:
-        logger.error("Unable to retrieve host component desired state.")
-      pass
-
-    return state
-
-
-  # Log response for PUT, POST or DELETE
-  def logResponse(self, text=None, response=None):
-    if response is not None:
-      resp = str(response.getcode())
-      if text is None:
-        text = 'Logging response from server: '
-      if resp is not None:
-        logger.info(text + resp)
-
-  def findClusterName(self):
-    clusterUrl = getUrl(GET_CLUSTERS_URI)
-    clusterName = None
-
-    logger.info("Requesting clusters: " + clusterUrl)
-    urlResponse = self.urlOpener.open(clusterUrl)
-    if urlResponse is not None:
-      response = urlResponse.read()
-      data = json.loads(response)
-      logger.debug('Response from findClusterName: %s' % data)
-      if data:
-        try:
-          clusters = data.get('items')
-          if len(clusters) > 1:
-            raise Exception('Multiple clusters found. %s' % clusters)
-
-          clusterName = clusters[0].get('Clusters').get('cluster_name')
-        except Exception, e:
-          logger.error('Unable to parse json data. %s' % data)
-          raise e
-        pass
-      else:
-        logger.error("Unable to retrieve clusters data.")
-      pass
-
-    return clusterName
-
-  def getHostname(self):
-    hostsUrl = getUrl(GET_HOST_COMPONENTS_URI.format(CLUSTER_NAME,
-                  self.serviveName, self.componentName))
-
-    logger.info("Requesting host info: " + hostsUrl)
-    urlResponse = self.urlOpener.open(hostsUrl)
-    hostname = None
-
-    if urlResponse is not None:
-      response = urlResponse.read()
-      data = json.loads(response)
-      logger.debug('Response from getHostname: %s' % data)
-      if data:
-        try:
-          hostRoles = data.get('host_components')
-          if not hostRoles:
-            raise Exception('Cannot find host component data for service: ' +\
-                            '{0}, component: {1}'.format(self.serviveName, self.componentName))
-          if len(hostRoles) > 1:
-            raise Exception('More than one hosts found with the same role')
-
-          hostname = hostRoles[0].get('HostRoles').get('host_name')
-        except Exception, e:
-          logger.error('Unable to parse json data. %s' % data)
-          raise e
-        pass
-
-      else:
-        logger.error("Unable to retrieve host component data.")
-      pass
-
-    return hostname
-
-
-def getUrl(partial_url):
-  return PROTOCOL + "://" + AMBARI_HOSTNAME + ":" + AMBARI_PORT + partial_url
-
-def get_supported_actions():
-  return [ RELOCATE_ACTION ]
-
-#
-# Main.
-#
-def main():
-  tempDir = tempfile.gettempdir()
-  outputFile = os.path.join(tempDir, "ambari_reinstall_probe.out")
-
-  parser = optparse.OptionParser(usage="usage: %prog [options]")
-  parser.set_description('This python program is a Ambari thin client and '
-                         'supports relocation of ambari host components on '
-                         'Ambari managed clusters.')
-
-  parser.add_option("-v", "--verbose", dest="verbose", action="store_false",
-                  default=False, help="output verbosity.")
-  parser.add_option("-s", "--host", dest="server_hostname",
-                  help="Ambari server host name.")
-  parser.add_option("-p", "--port", dest="server_port",
-                  default="8080" ,help="Ambari server port. [default: 8080]")
-  parser.add_option("-r", "--protocol", dest="protocol", default = "http",
-                  help="Protocol for communicating with Ambari server ("
-                       "http/https) [default: http].")
-  parser.add_option("-c", "--cluster-name", dest="cluster_name",
-                  help="Ambari cluster to operate on.")
-  parser.add_option("-e", "--service-name", dest="service_name",
-                  help="Ambari Service to which the component belongs to.")
-  parser.add_option("-m", "--component-name", dest="component_name",
-                  help="Ambari Service Component to operate on.")
-  parser.add_option("-n", "--new-host", dest="new_hostname",
-                  help="New host to relocate the component to.")
-  parser.add_option("-a", "--action", dest="action", default = "relocate",
-                  help="Script action. [default: relocate]")
-  parser.add_option("-o", "--output-file", dest="outputfile",
-                  default = outputFile, metavar="FILE",
-                  help="Output file. [default: %s]" % outputFile)
-  parser.add_option("-u", "--username", dest="username",
-                  default="admin" ,help="Ambari server admin user. [default: admin]")
-  parser.add_option("-w", "--password", dest="password",
-                  default="admin" ,help="Ambari server admin password.")
-  parser.add_option("-d", "--start-component", dest="start_component",
-                  action="store_false", default=False,
-                  help="Should the script start the component after relocate.")
-
-  (options, args) = parser.parse_args()
-
-  # set verbose
-  if options.verbose:
-    logging.basicConfig(level=logging.DEBUG)
-  else:
-    logging.basicConfig(level=logging.INFO)
-
-  global AMBARI_HOSTNAME
-  AMBARI_HOSTNAME = options.server_hostname
-
-  global AMBARI_PORT
-  AMBARI_PORT = options.server_port
-
-  global CLUSTER_NAME
-  CLUSTER_NAME = options.cluster_name
-
-  global PROTOCOL
-  PROTOCOL = options.protocol
-
-  global USERNAME
-  USERNAME = options.username
-
-  global PASSWORD
-  PASSWORD = options.password
-
-  global START_ON_RELOCATE
-  START_ON_RELOCATE = options.start_component
-
-  global logger
-  logger = logging.getLogger('AmbariProbe')
-  handler = logging.FileHandler(options.outputfile)
-  formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-  handler.setFormatter(formatter)
-  logger.addHandler(handler)
-
-  action = RELOCATE_ACTION
-
-  if options.action is not None:
-    if options.action not in get_supported_actions():
-      logger.error("Unsupported action: " + options.action + ", "
-                  "valid actions: " + str(get_supported_actions()))
-      sys.exit(1)
-    else:
-      action = options.action
-
-  socket.setdefaulttimeout(DEFAULT_TIMEOUT)
-
-  ambariResource = AmbariResource(serviceName=options.service_name,
-                                  componentName=options.component_name)
-  ambariResource.initializeResource()
-
-  if action == RELOCATE_ACTION:
-    if options.new_hostname is not None:
-      ambariResource.relocate(options.new_hostname)
-
-if __name__ == "__main__":
-  try:
-    main()
-  except (KeyboardInterrupt, EOFError):
-    print("\nAborting ... Keyboard Interrupt.")
-    sys.exit(1)


[3/8] ambari git commit: AMBARI-20906 : made view directory watcher service optional by introducing views.directory.watcher.disable property in ambari.properties (nitirajrathore)

Posted by rl...@apache.org.
AMBARI-20906 : made view directory watcher service optional by introducing views.directory.watcher.disable property in ambari.properties (nitirajrathore)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/26d28efa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/26d28efa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/26d28efa

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 26d28efabecf225c255d6850b266ba2b22a5c6a3
Parents: aef6026
Author: Nitiraj Singh Rathore <ni...@gmail.com>
Authored: Tue May 2 14:24:55 2017 +0530
Committer: Nitiraj Singh Rathore <ni...@gmail.com>
Committed: Tue May 2 14:24:55 2017 +0530

----------------------------------------------------------------------
 .../ambari/server/configuration/Configuration.java  | 16 ++++++++++++++++
 .../ambari/server/controller/AmbariServer.java      |  6 +++++-
 .../org/apache/ambari/server/view/ViewRegistry.java |  2 +-
 3 files changed, 22 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/26d28efa/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 55a4f50..ff20834 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -364,6 +364,13 @@ public class Configuration {
       "views.validate", "false");
 
   /**
+   * Determines whether the view directory watcher service should be disabled.
+   */
+  @Markdown(description = "Determines whether the view directory watcher service should be disabled.")
+  public static final ConfigurationProperty<String> DISABLE_VIEW_DIRECTORY_WATCHER = new ConfigurationProperty<>(
+      "views.directory.watcher.disable", "false");
+
+  /**
    * Determines whether remove undeployed views from the Ambari database.
    */
   @Markdown(description = "Determines whether remove undeployed views from the Ambari database.")
@@ -3296,6 +3303,15 @@ public class Configuration {
   }
 
   /**
+   * Determines whether the view directory watcher service should be disabled
+   *
+   * @return true view directory watcher service should be disabled
+   */
+  public boolean isViewDirectoryWatcherServiceDisabled() {
+    return Boolean.parseBoolean(getProperty(DISABLE_VIEW_DIRECTORY_WATCHER));
+  }
+
+  /**
    * @return conventional Java version number, e.g. 7.
    * Integer is used here to simplify comparisons during usage.
    * If java version is not supported, returns -1

http://git-wip-us.apache.org/repos/asf/ambari/blob/26d28efa/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index f80d2dc..bc6b428 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -455,7 +455,6 @@ public class AmbariServer {
       SecurityContextHolder.setStrategyName(SecurityContextHolder.MODE_INHERITABLETHREADLOCAL);
 
       viewRegistry.readViewArchives();
-      viewDirectoryWatcher.start();
 
       //Check and load requestlog handler.
       loadRequestlogHandler(handlerList, serverForAgent, configsMap);
@@ -563,6 +562,11 @@ public class AmbariServer {
       serverForAgent.start();
       LOG.info("********* Started Server **********");
 
+      if( !configs.isViewDirectoryWatcherServiceDisabled()) {
+        LOG.info("Starting View Directory Watcher");
+        viewDirectoryWatcher.start();
+      }
+
       manager.start();
       LOG.info("********* Started ActionManager **********");
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/26d28efa/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java b/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
index c7b2f79..6d16327 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
@@ -1811,7 +1811,7 @@ public class ViewRegistry {
   }
 
   // read a view archive
-  private void readViewArchive(ViewEntity viewDefinition,
+  private synchronized void readViewArchive(ViewEntity viewDefinition,
                                File archiveFile,
                                File extractedArchiveDirFile,
                                String serverVersion) {


[4/8] ambari git commit: AMBARI-20912. Unable to view visual explain details in Firefox browser (pallavkul)

Posted by rl...@apache.org.
AMBARI-20912. Unable to view visual explain details in Firefox browser (pallavkul)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/13a981c1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/13a981c1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/13a981c1

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 13a981c1a32e788c41657c9d2e0089cc0b86986c
Parents: 26d28ef
Author: pallavkul <pa...@gmail.com>
Authored: Tue May 2 17:05:50 2017 +0530
Committer: pallavkul <pa...@gmail.com>
Committed: Tue May 2 17:05:50 2017 +0530

----------------------------------------------------------------------
 .../src/main/resources/ui/app/utils/hive-explainer/renderer.js   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/13a981c1/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
----------------------------------------------------------------------
diff --git a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
index 3a23226..c880614 100644
--- a/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
+++ b/contrib/views/hive20/src/main/resources/ui/app/utils/hive-explainer/renderer.js
@@ -75,11 +75,11 @@ export default function doRender(data, selector, onRequestDetail, draggable) {
 
   const drag = d3.behavior.drag()
     .on("dragstart", (event) => {
-      let evt = window.event || event;
-      currentTransform = d3.transform(evt.currentTarget.firstElementChild.getAttribute('transform'));
       draggable.set('dragstart', true);
       draggable.set('zoom',false);
 
+      let evt = window.event || event;
+      currentTransform = d3.transform(evt.currentTarget.firstElementChild.getAttribute('transform'));
     })
     .on("dragend", () => {
       draggable.set('dragend', true);


[2/8] ambari git commit: AMBARI-20865. Remove redundant whitespace in Hadoop 3.0 configs (alejandro)

Posted by rl...@apache.org.
AMBARI-20865. Remove redundant whitespace in Hadoop 3.0 configs (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aef60264
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aef60264
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aef60264

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: aef60264f105a3b060a91dea1d637638384f0289
Parents: f167236
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Apr 26 14:04:44 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon May 1 15:04:14 2017 -0700

----------------------------------------------------------------------
 .../HDFS/3.0.0.3.0/configuration/hadoop-env.xml | 200 +++++-----
 .../HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml | 382 +++++++++----------
 .../HIVE/2.1.0.3.0/configuration/hcat-env.xml   |  48 +--
 .../HIVE/2.1.0.3.0/configuration/hive-env.xml   |  78 ++--
 .../configuration/hive-interactive-env.xml      |  63 ++-
 .../YARN/3.0.0.3.0/configuration/yarn-env.xml   | 206 +++++-----
 .../YARN/3.0.0.3.0/configuration/yarn-log4j.xml | 126 +++---
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  |   7 +-
 .../3.4.5/configuration/zookeeper-log4j.xml     |   2 +-
 9 files changed, 555 insertions(+), 557 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
index e447c52..e292e6e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hadoop-env.xml
@@ -269,143 +269,143 @@
     <display-name>hadoop-env template</display-name>
     <description>This is the jinja template for hadoop-env.sh file</description>
     <value>
-      # Set Hadoop-specific environment variables here.
+# Set Hadoop-specific environment variables here.
 
-      # The only required environment variable is JAVA_HOME.  All others are
-      # optional.  When running a distributed configuration it is best to
-      # set JAVA_HOME in this file, so that it is correctly defined on
-      # remote nodes.
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
 
-      # The java implementation to use.  Required.
-      export JAVA_HOME={{java_home}}
-      export HADOOP_HOME_WARN_SUPPRESS=1
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
 
-      # Hadoop home directory
-      export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
 
-      # Hadoop Configuration Directory
-      #TODO: if env var set that can cause problems
-      export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
 
 
-      # Path to jsvc required by secure datanode
-      export JSVC_HOME={{jsvc_path}}
+# Path to jsvc required by secure datanode
+export JSVC_HOME={{jsvc_path}}
 
 
-      # The maximum amount of heap to use, in MB. Default is 1000.
-      if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then
-      if [ "$HADOOP_HEAPSIZE" = "" ]; then
-      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-      fi
-      else
-      export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-      fi
+# The maximum amount of heap to use, in MB. Default is 1000.
+if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then
+if [ "$HADOOP_HEAPSIZE" = "" ]; then
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+fi
+else
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+fi
 
 
-      export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 
-      # Extra Java runtime options.  Empty by default.
-      export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
-      # Command specific options appended to HADOOP_OPTS when specified
+# Command specific options appended to HADOOP_OPTS when specified
 
-      {% if java_version &lt; 8 %}
-      export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1  ${HADOOP_NAMENODE_OPTS}"
-      export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+{% if java_version &lt; 8 %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1  ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
-      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
-      {% else %}
-      export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
-      export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+{% else %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
-      # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-      export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-      {% endif %}
-      HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+{% endif %}
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
-      HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-      HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-      HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
-      # On secure datanodes, user to run the datanode as after dropping privileges
-      export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
 
-      # Extra ssh options.  Empty by default.
-      export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
 
-      # Where log files are stored.  $HADOOP_HOME/logs by default.
-      export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
 
-      # History server logs
-      export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
 
-      # Where log files are stored in the secure data environment.
-      export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
 
-      # File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-      # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
 
-      # host:path where hadoop code should be rsync'd from.  Unset by default.
-      # export HADOOP_MASTER=master:/home/$USER/src/hadoop
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
 
-      # Seconds to sleep between slave commands.  Unset by default.  This
-      # can be useful in large clusters, where, e.g., slave rsyncs can
-      # otherwise arrive faster than the master can service them.
-      # export HADOOP_SLAVE_SLEEP=0.1
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
 
-      # The directory where pid files are stored. /tmp by default.
-      export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-      export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
 
-      # History server pid
-      export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
 
-      YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT"
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT"
 
-      # A string representing this instance of hadoop. $USER by default.
-      export HADOOP_IDENT_STRING=$USER
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
 
-      # The scheduling priority for daemon processes.  See 'man nice'.
+# The scheduling priority for daemon processes.  See 'man nice'.
 
-      # export HADOOP_NICENESS=10
+# export HADOOP_NICENESS=10
 
-      # Add database libraries
-      JAVA_JDBC_LIBS=""
-      if [ -d "/usr/share/java" ]; then
-      for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
-      do
-      JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-      done
-      fi
+# Add database libraries
+JAVA_JDBC_LIBS=""
+if [ -d "/usr/share/java" ]; then
+for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+do
+JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+fi
 
-      # Add libraries required by nodemanager
-      MAPREDUCE_LIBS={{mapreduce_libs_path}}
+# Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
 
-      # Add libraries to the hadoop classpath - some may not need a colon as they already include it
-      export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+# Add libraries to the hadoop classpath - some may not need a colon as they already include it
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
 
-      if [ -d "/usr/lib/tez" ]; then
-      export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
-      fi
+if [ -d "/usr/lib/tez" ]; then
+export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
 
-      # Setting path to hdfs command line
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 
-      #Mostly required for hadoop 2.0
-      export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-{{architecture}}-64
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-{{architecture}}-64
 
-      {% if is_datanode_max_locked_memory_set %}
-      # Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
-      # Makes sense to fix only when runing DN as root
-      if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
-      ulimit -l {{datanode_max_locked_memory}}
-      fi
-      {% endif %}
-      # Enable ACLs on zookeper znodes if required
-      {% if hadoop_zkfc_opts is defined %}
-      export HADOOP_ZKFC_OPTS="{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS"
-      {% endif %}
+{% if is_datanode_max_locked_memory_set %}
+# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
+# Makes sense to fix only when runing DN as root
+if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ulimit -l {{datanode_max_locked_memory}}
+fi
+{% endif %}
+# Enable ACLs on zookeper znodes if required
+{% if hadoop_zkfc_opts is defined %}
+export HADOOP_ZKFC_OPTS="{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
index 5f6ec3f..f529494 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/configuration/hdfs-log4j.xml
@@ -26,197 +26,197 @@
     <display-name>hdfs-log4j template</display-name>
     <description>Custom log4j.properties</description>
     <value>
-      #
-      # Licensed to the Apache Software Foundation (ASF) under one
-      # or more contributor license agreements.  See the NOTICE file
-      # distributed with this work for additional information
-      # regarding copyright ownership.  The ASF licenses this file
-      # to you under the Apache License, Version 2.0 (the
-      # "License"); you may not use this file except in compliance
-      # with the License.  You may obtain a copy of the License at
-      #
-      #  http://www.apache.org/licenses/LICENSE-2.0
-      #
-      # Unless required by applicable law or agreed to in writing,
-      # software distributed under the License is distributed on an
-      # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-      # KIND, either express or implied.  See the License for the
-      # specific language governing permissions and limitations
-      # under the License.
-      #
-
-
-      # Define some default values that can be overridden by system properties
-      # To change daemon root logger use hadoop_root_logger in hadoop-env
-      hadoop.root.logger=INFO,console
-      hadoop.log.dir=.
-      hadoop.log.file=hadoop.log
-
-
-      # Define the root logger to the system property "hadoop.root.logger".
-      log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-      # Logging Threshold
-      log4j.threshhold=ALL
-
-      #
-      # Daily Rolling File Appender
-      #
-
-      log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-      # Rollver at midnight
-      log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-      # 30-day backup
-      #log4j.appender.DRFA.MaxBackupIndex=30
-      log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-      # Pattern format: Date LogLevel LoggerName LogMessage
-      log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-      # Debugging Pattern format
-      #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-      #
-      # console
-      # Add "console" to rootlogger above if you want to use this
-      #
-
-      log4j.appender.console=org.apache.log4j.ConsoleAppender
-      log4j.appender.console.target=System.err
-      log4j.appender.console.layout=org.apache.log4j.PatternLayout
-      log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-      #
-      # TaskLog Appender
-      #
-
-      #Default values
-      hadoop.tasklog.taskid=null
-      hadoop.tasklog.iscleanup=false
-      hadoop.tasklog.noKeepSplits=4
-      hadoop.tasklog.totalLogFileSize=100
-      hadoop.tasklog.purgeLogSplits=true
-      hadoop.tasklog.logsRetainHours=12
-
-      log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-      log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-      log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-      log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-      log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-      #
-      #Security audit appender
-      #
-      hadoop.security.logger=INFO,console
-      hadoop.security.log.maxfilesize=256MB
-      hadoop.security.log.maxbackupindex=20
-      log4j.category.SecurityLogger=${hadoop.security.logger}
-      hadoop.security.log.file=SecurityAuth.audit
-      log4j.additivity.SecurityLogger=false
-      log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-      log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-      log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-      log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-      log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-      log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-      log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-      log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-      log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-      #
-      # hdfs audit logging
-      #
-      hdfs.audit.logger=INFO,console
-      log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-      log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-      log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-      log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-      #
-      # NameNode metrics logging.
-      # The default is to retain two namenode-metrics.log files up to 64MB each.
-      #
-      namenode.metrics.logger=INFO,NullAppender
-      log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
-      log4j.additivity.NameNodeMetricsLog=false
-      log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
-      log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
-      log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
-      log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
-      log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
-
-      #
-      # mapred audit logging
-      #
-      mapred.audit.logger=INFO,console
-      log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-      log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-      log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-      log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-      #
-      # Rolling File Appender
-      #
-
-      log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-      log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-      # Logfile size and and 30-day backups
-      log4j.appender.RFA.MaxFileSize=256MB
-      log4j.appender.RFA.MaxBackupIndex=10
-
-      log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-      log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-      # Custom Logging levels
-
-      hadoop.metrics.log.level=INFO
-      #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-      #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-      #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-      log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-      # Jets3t library
-      log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-      #
-      # Null Appender
-      # Trap security logger on the hadoop client side
-      #
-      log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-      #
-      # Event Counter Appender
-      # Sends counts of logging messages at different severity levels to Hadoop Metrics.
-      #
-      log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-      # Removes "deprecated" messages
-      log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
-
-      #
-      # HDFS block state change log from block manager
-      #
-      # Uncomment the following to suppress normal block state change
-      # messages from BlockManager in NameNode.
-      #log4j.logger.BlockStateChange=WARN
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+# To change daemon root logger use hadoop_root_logger in hadoop-env
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.additivity.SecurityLogger=false
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# NameNode metrics logging.
+# The default is to retain two namenode-metrics.log files up to 64MB each.
+#
+namenode.metrics.logger=INFO,NullAppender
+log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
+log4j.additivity.NameNodeMetricsLog=false
+log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
+log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
+log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
+log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml
index 964abdb..1244979 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hcat-env.xml
@@ -26,31 +26,31 @@
     <display-name>hcat-env template</display-name>
     <description>This is the jinja template for hcat-env.sh file</description>
     <value>
-      # Licensed to the Apache Software Foundation (ASF) under one
-      # or more contributor license agreements. See the NOTICE file
-      # distributed with this work for additional information
-      # regarding copyright ownership. The ASF licenses this file
-      # to you under the Apache License, Version 2.0 (the
-      # "License"); you may not use this file except in compliance
-      # with the License. You may obtain a copy of the License at
-      #
-      # http://www.apache.org/licenses/LICENSE-2.0
-      #
-      # Unless required by applicable law or agreed to in writing, software
-      # distributed under the License is distributed on an "AS IS" BASIS,
-      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      # See the License for the specific language governing permissions and
-      # limitations under the License.
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
-      JAVA_HOME={{java64_home}}
-      HCAT_PID_DIR={{hcat_pid_dir}}/
-      HCAT_LOG_DIR={{hcat_log_dir}}/
-      HCAT_CONF_DIR={{hcat_conf_dir}}
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-      #DBROOT is the path where the connector jars are downloaded
-      DBROOT={{hcat_dbroot}}
-      USER={{webhcat_user}}
-      METASTORE_PORT={{hive_metastore_port}}
+JAVA_HOME={{java64_home}}
+HCAT_PID_DIR={{hcat_pid_dir}}/
+HCAT_LOG_DIR={{hcat_log_dir}}/
+HCAT_CONF_DIR={{hcat_conf_dir}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT={{hcat_dbroot}}
+USER={{webhcat_user}}
+METASTORE_PORT={{hive_metastore_port}}
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
index 4ed26f7..3cef34b 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
@@ -436,56 +436,56 @@
     <display-name>hive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
+if [ "$SERVICE" = "cli" ]; then
+if [ -z "$DEBUG" ]; then
+export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
+else
+export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
+export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
 
-      # Folder containing extra libraries required for hive compilation/execution can be controlled by:
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
-          export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-        elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-          export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-        fi
-      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-      fi
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
+    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+  elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+  fi
+elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      {% if sqla_db_used or lib_dir_available %}
-      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      {% endif %}
+{% if sqla_db_used or lib_dir_available %}
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml
index e2048a2..940fc79 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-interactive-env.xml
@@ -281,48 +281,47 @@
     <display-name>hive-interactive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+if [ "$SERVICE" = "cli" ]; then
+if [ -z "$DEBUG" ]; then
+export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+else
+export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
 
-      # Add additional hcatalog jars
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-      else
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
-      fi
+# Add additional hcatalog jars
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+else
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
-
-      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2
-      export HIVE_SKIP_SPARK_ASSEMBLY=true
+export METASTORE_PORT={{hive_metastore_port}}
 
+# Spark assembly contains a conflicting copy of HiveConf from hive-1.2
+export HIVE_SKIP_SPARK_ASSEMBLY=true
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
index 5fb4732..6a52865 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-env.xml
@@ -161,131 +161,131 @@
     <display-name>yarn-env template</display-name>
     <description>This is the jinja template for yarn-env.sh file</description>
     <value>
-      export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-      export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-      export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-      export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-      export JAVA_HOME={{java64_home}}
-      export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}"
 
-      # We need to add the EWMA appender for the yarn daemons only;
-      # however, YARN_ROOT_LOGGER is shared by the yarn client and the
-      # daemons. This is restrict the EWMA appender to daemons only.
-      INVOKER="${0##*/}"
-      if [ "$INVOKER" == "yarn-daemon.sh" ]; then
-      export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
-      fi
+# We need to add the EWMA appender for the yarn daemons only;
+# however, YARN_ROOT_LOGGER is shared by the yarn client and the
+# daemons. This is restrict the EWMA appender to daemons only.
+INVOKER="${0##*/}"
+if [ "$INVOKER" == "yarn-daemon.sh" ]; then
+export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}
+fi
 
-      # User for YARN daemons
-      export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
 
-      # resolve links - $0 may be a softlink
-      export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
 
-      # some Java parameters
-      # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-      if [ "$JAVA_HOME" != "" ]; then
-      #echo "run java in $JAVA_HOME"
-      JAVA_HOME=$JAVA_HOME
-      fi
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+#echo "run java in $JAVA_HOME"
+JAVA_HOME=$JAVA_HOME
+fi
 
-      if [ "$JAVA_HOME" = "" ]; then
-      echo "Error: JAVA_HOME is not set."
-      exit 1
-      fi
+if [ "$JAVA_HOME" = "" ]; then
+echo "Error: JAVA_HOME is not set."
+exit 1
+fi
 
-      JAVA=$JAVA_HOME/bin/java
-      JAVA_HEAP_MAX=-Xmx1000m
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
 
-      # For setting YARN specific HEAP sizes please use this
-      # Parameter and set appropriately
-      YARN_HEAPSIZE={{yarn_heapsize}}
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
 
-      # check envvars which might override default args
-      if [ "$YARN_HEAPSIZE" != "" ]; then
-      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-      fi
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
 
-      # Resource Manager specific parameters
+# Resource Manager specific parameters
 
-      # Specify the max Heapsize for the ResourceManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_RESOURCEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
 
-      # Specify the JVM options to be used when starting the ResourceManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_RESOURCEMANAGER_OPTS=
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
 
-      # Node Manager specific parameters
+# Node Manager specific parameters
 
-      # Specify the max Heapsize for the NodeManager using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1000.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_NODEMANAGER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
 
-      # Specify the max Heapsize for the timeline server using a numerical value
-      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-      # the value to 1024.
-      # This value will be overridden by an Xmx setting specified in either YARN_OPTS
-      # and/or YARN_TIMELINESERVER_OPTS.
-      # If not specified, the default value will be picked from either YARN_HEAPMAX
-      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+# Specify the max Heapsize for the timeline server using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_TIMELINESERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}
 
-      # Specify the JVM options to be used when starting the NodeManager.
-      # These options will be appended to the options specified as YARN_OPTS
-      # and therefore may override any similar flags set in YARN_OPTS
-      #export YARN_NODEMANAGER_OPTS=
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
 
-      # so that filenames w/ spaces are handled correctly in loops below
-      IFS=
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
 
 
-      # default log directory and file
-      if [ "$YARN_LOG_DIR" = "" ]; then
-      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-      fi
-      if [ "$YARN_LOGFILE" = "" ]; then
-      YARN_LOGFILE='yarn.log'
-      fi
+# default log directory and file
+if [ "$YARN_LOG_DIR" = "" ]; then
+YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+YARN_LOGFILE='yarn.log'
+fi
 
-      # default policy file for service-level authorization
-      if [ "$YARN_POLICYFILE" = "" ]; then
-      YARN_POLICYFILE="hadoop-policy.xml"
-      fi
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+YARN_POLICYFILE="hadoop-policy.xml"
+fi
 
-      # restore ordinary behaviour
-      unset IFS
+# restore ordinary behaviour
+unset IFS
 
 
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-      YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-      YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-      YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-      export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
-      export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
-      if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-      fi
-      YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
-      YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
-      {% if rm_security_opts is defined %}
-      YARN_OPTS="{{rm_security_opts}} $YARN_OPTS"
-      {% endif %}
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT"
+export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+{% if rm_security_opts is defined %}
+YARN_OPTS="{{rm_security_opts}} $YARN_OPTS"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
index a200e74..dab4516 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-log4j.xml
@@ -25,74 +25,74 @@
     <display-name>yarn-log4j template</display-name>
     <description>Custom log4j.properties</description>
     <value>
-      #Relative to Yarn Log Dir Prefix
-      yarn.log.dir=.
-      #
-      # Job Summary Appender
-      #
-      # Use following logger to send summary to separate file defined by
-      # hadoop.mapreduce.jobsummary.log.file rolled daily:
-      # hadoop.mapreduce.jobsummary.logger=INFO,JSA
-      #
-      hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-      hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-      log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-      # Set the ResourceManager summary log filename
-      yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-      # Set the ResourceManager summary log level and appender
-      yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-      #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+#Relative to Yarn Log Dir Prefix
+yarn.log.dir=.
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 
-      # To enable AppSummaryLogging for the RM,
-      # set yarn.server.resourcemanager.appsummary.logger to
-      # LEVEL,RMSUMMARY in hadoop-env.sh
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# LEVEL,RMSUMMARY in hadoop-env.sh
 
-      # Appender for ResourceManager Application Summary Log
-      # Requires the following properties to be set
-      #    - hadoop.log.dir (Hadoop Log directory)
-      #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-      #    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-      log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-      log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-      log4j.appender.RMSUMMARY.MaxFileSize=256MB
-      log4j.appender.RMSUMMARY.MaxBackupIndex=20
-      log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-      log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-      log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-      log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
 
-      # Appender for viewing information for errors and warnings
-      yarn.ewma.cleanupInterval=300
-      yarn.ewma.messageAgeLimitSeconds=86400
-      yarn.ewma.maxUniqueMessages=250
-      log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
-      log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
-      log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
-      log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
+# Appender for viewing information for errors and warnings
+yarn.ewma.cleanupInterval=300
+yarn.ewma.messageAgeLimitSeconds=86400
+yarn.ewma.maxUniqueMessages=250
+log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
+log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
+log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
 
-      # Audit logging for ResourceManager
-      rm.audit.logger=${hadoop.root.logger}
-      log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
-      log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
-      log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
-      log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
+# Audit logging for ResourceManager
+rm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false
+log4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log
+log4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd
 
-      # Audit logging for NodeManager
-      nm.audit.logger=${hadoop.root.logger}
-      log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
-      log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
-      log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
-      log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
-      log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
-      log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-      log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
+# Audit logging for NodeManager
+nm.audit.logger=${hadoop.root.logger}
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false
+log4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log
+log4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index cc96cd7..64e0bcb 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -411,9 +411,10 @@
   </property>
 
   <!-- These configs were inherited from HDP 2.1 -->
+  <!-- TODO, temporarily disable timeline service since failing due to YARN-6534 -->
   <property>
     <name>yarn.timeline-service.enabled</name>
-    <value>true</value>
+    <value>false</value>
     <description>Indicate to clients whether timeline service is enabled or not.
       If enabled, clients will put entities and events to the timeline server.
     </description>
@@ -1033,11 +1034,9 @@ yarn.node-labels.manager-class
   </property>
 
   <!--ats v2.0 properties-->
-
-  <!-- TODO HDP 3.0, set version to 2.0 once ready. -->
   <property>
     <name>yarn.timeline-service.version</name>
-    <value>1.5</value>
+    <value>2.0</value>
     <description>Timeline service version we&#x2019;re currently using.</description>
     <on-ambari-upgrade add="false"/>
   </property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/aef60264/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml
index 76dff64..ff9138e 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5/configuration/zookeeper-log4j.xml
@@ -30,7 +30,7 @@
     </value-attributes>
     <on-ambari-upgrade add="false"/>
   </property>
-<property>
+  <property>
     <name>zookeeper_log_number_of_backup_files</name>
     <value>10</value>
     <description>The number of backup files</description>


[8/8] ambari git commit: AMBARI-20443. No need to show (Masahiro Tanaka via alejandro)

Posted by rl...@apache.org.
AMBARI-20443. No need to show  (Masahiro Tanaka via alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b3f7d9e4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b3f7d9e4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b3f7d9e4

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: b3f7d9e4211a3378c99538f606b27c30a33a34be
Parents: 6e2d321
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Tue May 2 11:43:07 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Tue May 2 11:43:07 2017 -0700

----------------------------------------------------------------------
 .../common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml    | 3 ++-
 .../common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml     | 1 +
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b3f7d9e4/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
index caa598a..b2c364c 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
@@ -117,7 +117,8 @@
     <name>hive_ambari_database</name>
     <value>MySQL</value>
     <description>Database type.</description>
-    <on-ambari-upgrade add="true"/>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>hive_database_name</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3f7d9e4/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
index 3cef34b..54a62e2 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/configuration/hive-env.xml
@@ -86,6 +86,7 @@
     <name>hive_ambari_database</name>
     <value>MySQL</value>
     <description>Database type.</description>
+    <deleted>true</deleted>
     <on-ambari-upgrade add="false"/>
   </property>
   <property>