You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sm...@apache.org on 2015/11/04 00:34:20 UTC

ambari git commit: AMBARI-13301. Kerberos support to ECS stack (Vijay Srinivasaraghavan via smohanty)

Repository: ambari
Updated Branches:
  refs/heads/trunk 463ed4bd9 -> b179d8de9


AMBARI-13301. Kerberos support to ECS stack (Vijay Srinivasaraghavan via smohanty)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b179d8de
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b179d8de
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b179d8de

Branch: refs/heads/trunk
Commit: b179d8de9551d8963f4903fc1736cde548050434
Parents: 463ed4b
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Nov 3 15:33:57 2015 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Nov 3 15:33:57 2015 -0800

----------------------------------------------------------------------
 ambari-server/sbin/ambari-server                |   6 +-
 ambari-server/src/main/python/ambari-server.py  |  27 ++-
 .../main/python/ambari_server/enableStack.py    |  94 ++++++++
 .../main/python/ambari_server/setupActions.py   |   1 +
 .../2.0.6/hooks/before-START/scripts/params.py  |  34 +++
 .../scripts/shared_initialization.py            |  21 ++
 .../stacks/HDP/2.3.ECS/repos/repoinfo.xml       |  30 ++-
 .../stacks/HDP/2.3.ECS/role_command_order.json  |   3 +-
 .../services/ECS/configuration/core-site.xml    | 197 +++++++++--------
 .../HDP/2.3.ECS/services/ECS/kerberos.json      |  53 +++++
 .../HDP/2.3.ECS/services/ECS/metainfo.xml       |   4 +-
 .../services/ECS/package/scripts/ecs_client.py  |   2 +
 .../services/ECS/package/scripts/params.py      |   7 +-
 .../ECS/package/scripts/service_check.py        |  19 +-
 .../HDP/2.3.ECS/services/HBASE/kerberos.json    | 132 ++++++++++++
 .../HDP/2.3.ECS/services/YARN/kerberos.json     | 215 +++++++++++++++++++
 16 files changed, 735 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/sbin/ambari-server
----------------------------------------------------------------------
diff --git a/ambari-server/sbin/ambari-server b/ambari-server/sbin/ambari-server
index 490d54d..c773718 100755
--- a/ambari-server/sbin/ambari-server
+++ b/ambari-server/sbin/ambari-server
@@ -148,9 +148,13 @@ case "$1" in
         echo -e "Updating host names"
         $PYTHON /usr/sbin/ambari-server.py $@
         ;;
+  enable-stack)
+        echo -e "Enabling stack(s)..."
+        $PYTHON /usr/sbin/ambari-server.py $@
+        ;;
   *)
         echo "Usage: /usr/sbin/ambari-server
-        {start|stop|restart|setup|setup-jce|upgrade|status|upgradestack|setup-ldap|sync-ldap|set-current|setup-security|refresh-stack-hash|backup|restore|update-host-names} [options]
+        {start|stop|restart|setup|setup-jce|upgrade|status|upgradestack|setup-ldap|sync-ldap|set-current|setup-security|refresh-stack-hash|backup|restore|update-host-names|enable-stack} [options]
         Use usr/sbin/ambari-server <action> --help to get details on options available.
         Or, simply invoke ambari-server.py --help to print the options."
         exit 1

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 56dfecc..3ea608c 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -37,10 +37,11 @@ from ambari_server.serverSetup import reset, setup, setup_jce_policy
 from ambari_server.serverUpgrade import upgrade, upgrade_stack, set_current
 from ambari_server.setupHttps import setup_https, setup_truststore
 from ambari_server.hostUpdate import update_host_names
+from ambari_server.enableStack import enable_stack_version
 
 from ambari_server.setupActions import BACKUP_ACTION, LDAP_SETUP_ACTION, LDAP_SYNC_ACTION, PSTART_ACTION, \
   REFRESH_STACK_HASH_ACTION, RESET_ACTION, RESTORE_ACTION, UPDATE_HOST_NAMES_ACTION, SETUP_ACTION, SETUP_SECURITY_ACTION, \
-  START_ACTION, STATUS_ACTION, STOP_ACTION, UPGRADE_ACTION, UPGRADE_STACK_ACTION, SETUP_JCE_ACTION, SET_CURRENT_ACTION
+  START_ACTION, STATUS_ACTION, STOP_ACTION, UPGRADE_ACTION, UPGRADE_STACK_ACTION, SETUP_JCE_ACTION, SET_CURRENT_ACTION, ENABLE_STACK_ACTION
 from ambari_server.setupSecurity import setup_ldap, sync_ldap, setup_master_key, setup_ambari_krb5_jaas
 from ambari_server.userInput import get_validated_string_input
 
@@ -373,6 +374,10 @@ def init_parser_options(parser):
   parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name")
   parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version")
   parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version")
+  parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string",
+                    help="Specify stack version that needs to be enabled. All other stacks versions will be disabled")
+  parser.add_option('--stack', dest="stack_name", default=None, type="string",
+                    help="Specify stack name for the stack versions that needs to be enabled")
 
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
 def are_cmd_line_db_args_blank(options):
@@ -523,7 +528,8 @@ def create_user_action_map(args, options):
         REFRESH_STACK_HASH_ACTION: UserAction(refresh_stack_hash_action),
         BACKUP_ACTION: UserActionPossibleArgs(backup, [1, 2], args),
         RESTORE_ACTION: UserActionPossibleArgs(restore, [1, 2], args),
-        UPDATE_HOST_NAMES_ACTION: UserActionPossibleArgs(update_host_names, [2], args, options)
+        UPDATE_HOST_NAMES_ACTION: UserActionPossibleArgs(update_host_names, [2], args, options),
+        ENABLE_STACK_ACTION: UserAction(enable_stack, options, args)
       }
   return action_map
 
@@ -630,6 +636,23 @@ def mainBody():
       print_error_msg("Unexpected {0}: {1}".format((e).__class__.__name__, str(e)) +\
       "\nFor more info run ambari-server with -v or --verbose option")
       sys.exit(1)     
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def enable_stack(options, args):
+  if options.stack_name == None:
+     print_error_msg ("Please provide stack name using --stack option")
+     return -1
+  if options.stack_versions == None:
+     print_error_msg ("Please provide stack version using --version option")
+     return -1
+  print_info_msg ("Going to enable Stack Versions: " +  str(options.stack_versions) + " for the stack: " + str(options.stack_name))
+  retcode = enable_stack_version(options.stack_name,options.stack_versions)
+  if retcode == 0:
+     status, pid = is_server_runing()
+     if status:
+        print "restarting ambari server"
+        stop(options)
+        start(options)
       
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/python/ambari_server/enableStack.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/enableStack.py b/ambari-server/src/main/python/ambari_server/enableStack.py
new file mode 100644
index 0000000..bf064bd
--- /dev/null
+++ b/ambari-server/src/main/python/ambari_server/enableStack.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import re
+import fileinput
+
+from ambari_commons.exceptions import FatalException
+from ambari_commons.logging_utils import print_info_msg, print_warning_msg, print_error_msg, get_verbose
+from ambari_commons.os_utils import is_root
+from ambari_server.serverConfiguration import get_ambari_properties, get_stack_location
+from ambari_server.serverUtils import is_server_runing
+
+#
+# Stack enable/disable
+#
+
+def enable_stack_version(stack_name, stack_versions):
+  if not is_root():
+    err = 'Ambari-server enable-stack should be run with ' \
+          'root-level privileges'
+    raise FatalException(4, err)
+
+  try:
+    print_info_msg("stack name requested: " + str(stack_name))
+    print_info_msg("stack version requested: " + str(stack_versions))
+  except IndexError:
+    raise FatalException("Invalid stack version passed")
+
+  retcode = update_stack_metainfo(stack_name,stack_versions)
+
+  if not retcode == 0:
+    raise FatalException(retcode, 'Stack enable request failed.')
+
+  return retcode
+
+def update_stack_metainfo(stack_name, stack_versions):
+  properties = get_ambari_properties()
+  if properties == -1:
+    print_error_msg("Error getting ambari properties")
+    return -1
+
+  stack_location = get_stack_location(properties)
+  print_info_msg ("stack location: "+ stack_location)
+
+  stack_root = os.path.join(stack_location, stack_name)
+  print_info_msg ("stack root: "+ stack_root)
+  if not os.path.exists(stack_root):
+    print_error_msg("stack directory does not exists: " + stack_root)
+    return -1
+
+  for stack in stack_versions:
+    if stack  not in os.listdir(stack_root):
+      print_error_msg ("The requested stack version: " + stack + " is not available in the HDP stack")
+      return -1
+
+  for directory in os.listdir(stack_root):
+    print_info_msg("directory found: " + directory)
+    metainfo_file = os.path.join(stack_root, directory, "metainfo.xml")
+    print_info_msg("looking for metainfo file: " + metainfo_file)
+    if not os.path.exists(metainfo_file):
+      print_error_msg("Could not find metainfo file in the path " + metainfo_file)
+      continue
+    if directory in stack_versions:
+       print_info_msg ("updating stack to active for: " + directory )
+       replace(metainfo_file,"<active>false</active>","<active>true</active>")
+    else:
+       print_info_msg ("updating stack to inactive for: " + directory )
+       replace(metainfo_file,"<active>true</active>","<active>false</active>")
+  return 0
+
+def replace(file_path, pattern, subst):
+   for line in fileinput.input(file_path, inplace=1):
+      line = re.sub(pattern,subst, line.rstrip())
+      print(line)
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/python/ambari_server/setupActions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupActions.py b/ambari-server/src/main/python/ambari_server/setupActions.py
index 686ac9a..ec2d788 100644
--- a/ambari-server/src/main/python/ambari_server/setupActions.py
+++ b/ambari-server/src/main/python/ambari_server/setupActions.py
@@ -39,3 +39,4 @@ UPDATE_HOST_NAMES_ACTION = "update-host-names"
 BACKUP_ACTION = "backup"
 RESTORE_ACTION = "restore"
 SETUP_JCE_ACTION = "setup-jce"
+ENABLE_STACK_ACTION = "enable-stack"

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index bbdfc63..23c7489 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -27,6 +27,8 @@ from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from ambari_commons.os_check import OSCheck
 from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
 
 config = Script.get_config()
 
@@ -34,6 +36,7 @@ stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
 dfs_type = default("/commandParams/dfs_type", "")
+hadoop_conf_dir = "/etc/hadoop/conf"
 
 # hadoop default params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
@@ -206,3 +209,34 @@ net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
 net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
 net_topology_mapping_data_file_name = 'topology_mappings.data'
 net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+
+#Added logic to create /tmp and /user directory for HCFS stack.  
+has_core_site = 'core-site' in config['configurations']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  dfs_type = dfs_type
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
index e1ed8bf..1ddd4b4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
@@ -101,6 +101,9 @@ def setup_hadoop():
            content=Template("hadoop-metrics2.properties.j2")
       )
 
+    if params.dfs_type == 'HCFS' and params.has_core_site:
+       create_dirs()
+
 
 def setup_configs():
   """
@@ -149,3 +152,21 @@ def create_javahome_symlink():
          to="/usr/jdk64/jdk1.6.0_31",
     )
 
+def create_dirs():
+   import params
+   params.HdfsResource("/tmp",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777
+   )
+   params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+   )
+   params.HdfsResource(None,
+                      action="execute"
+   )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/repos/repoinfo.xml
index 80f95e1..b44cca5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/repos/repoinfo.xml
@@ -28,6 +28,11 @@
       <repoid>HDP-UTILS-1.1.0.20</repoid>
       <reponame>HDP-UTILS</reponame>
     </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
   </os>
   <os family="redhat7">
     <repo>
@@ -40,6 +45,11 @@
       <repoid>HDP-UTILS-1.1.0.20</repoid>
       <reponame>HDP-UTILS</reponame>
     </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
   </os>
   <os family="suse11">
     <repo>
@@ -52,6 +62,11 @@
       <repoid>HDP-UTILS-1.1.0.20</repoid>
       <reponame>HDP-UTILS</reponame>
     </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
   </os>
   <os family="ubuntu12">
     <repo>
@@ -64,6 +79,11 @@
       <repoid>HDP-UTILS-1.1.0.20</repoid>
       <reponame>HDP-UTILS</reponame>
     </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
   </os>
   <os family="debian7">
     <repo>
@@ -76,6 +96,11 @@
       <repoid>HDP-UTILS-1.1.0.20</repoid>
       <reponame>HDP-UTILS</reponame>
     </repo>
+    <repo>
+      <baseurl>http://ECS_CLIENT_REPO/</baseurl>
+      <repoid>ECS-2.2.0.0</repoid>
+      <reponame>ECS</reponame>
+    </repo>
   </os>
   <os family="ubuntu14">
     <repo>
@@ -88,13 +113,10 @@
       <repoid>HDP-UTILS-1.1.0.20</repoid>
       <reponame>HDP-UTILS</reponame>
     </repo>
-  </os>
-  <!-- ECS CLIENT REPO -->
-  <os family="any">
     <repo>
       <baseurl>http://ECS_CLIENT_REPO/</baseurl>
       <repoid>ECS-2.2.0.0</repoid>
       <reponame>ECS</reponame>
     </repo>
-  </os>  
+  </os>
 </reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/role_command_order.json
index 91c267b..08cb729 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/role_command_order.json
@@ -3,7 +3,8 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
-    "ECS-SERVICE_CHECK": ["ECS-INSTALL"]
+    "ECS-SERVICE_CHECK": ["ECS-INSTALL"],
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
   }
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml
index bdfe8dc..8f6a113 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/configuration/core-site.xml
@@ -20,95 +20,110 @@
  
 <configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
 
-<!-- file system properties -->
-
-  <property>
-  <name>fs.viprfs.impl</name>
-  <value>com.emc.hadoop.fs.vipr.ViPRFileSystem</value>
-  </property>
-
-  <property>
-  <name>fs.AbstractFileSystem.viprfs.impl</name>
-  <value>com.emc.hadoop.fs.vipr.ViPRAbstractFileSystem</value>
-  </property>
-
-  <property>
-    <name>fs.vipr.installations</name>
-    <value>Site1</value>
-  </property>  
-
-  <property>
-    <name>fs.vipr.installation.Site1.hosts</name>
-    <value></value>
-  </property>  
-
-  <property>
-    <name>fs.vipr.installation.Site1.hosts.resolution</name>
-    <value>dynamic</value>
-  </property>  
-
-  <property>
-    <name>fs.vipr.installation.Site1.resolution.dynamic.time_to_live_ms</name>
-    <value>900000</value>
-  </property>  
- 
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-  </property>  
-
-  <property>
-    <name>fs.viprfs.auth.anonymous_translation</name>
-    <value>LOCAL_USER</value>
-  </property>  
-
-  <property>
-    <name>fs.viprfs.auth.identity_translation</name>
-    <value>NONE</value>
-  </property>  
-
-  <property>
-    <name>fs.defaultFS</name>
-    <value></value>
-  </property>  
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
-    </description>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used for compression/decompression.</description>
-  </property>
-
-  <property>
-    <name>hadoop.security.authentication</name>
-    <value>simple</value>
-    <description>
-      Set the authentication for the cluster. Valid values are: simple or kerberos
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.authorization</name>
-    <value>false</value>
-    <description>
-        Enable authorization for different protocols.
-    </description>
-  </property>
-
-
+   <!-- HDFS Configurations -->
+
+    <property>
+      <name>fs.defaultFS</name>
+      <value></value>
+	  <description>Provide VIPRFS bucket details using the format viprfs://$BUCKET_NAME.$NAMESPACE.$SITE_NAME_from_fs.vipr.installations</description>
+    </property>
+
+    <property>
+      <name>hadoop.security.authentication</name>
+      <value>simple</value>
+	  <description>Supported values: simple, kerberos</description>
+    </property>
+
+    <property>
+      <name>hadoop.security.authorization</name>
+      <value>false</value>
+	  <description>Supported values true, false</description>
+    </property>
+
+    <property>
+      <name>hadoop.security.auth_to_local</name>
+      <value>DEFAULT</value>
+    </property>
+
+	<property>
+	  <name>fs.permissions.umask-mode</name>
+	  <value>022</value>
+    </property>  
+
+	<!-- VIPRFS Configurations -->
+
+    <property>
+      <name>fs.vipr.installations</name>
+      <value>Site1</value>
+	  <description>Provide site name of the tenant</description>
+    </property>
+
+    <property>
+      <name>fs.vipr.installation.Site1.hosts</name>
+      <value></value>
+	  <description>Provide ECS node IPs or VIP</description>
+    </property>
+
+    <property>
+      <name>fs.vipr.installation.Site1.resolution</name>
+      <value>dynamic</value>
+    </property>
+
+    <property>
+      <name>fs.vipr.installation.Site1.resolution.dynamic.time_to_live_ms</name>
+      <value>900000</value>
+    </property>
+
+    <property>
+      <name>fs.viprfs.auth.anonymous_translation</name>
+      <value>LOCAL_USER</value>
+	  <final>true</final>
+	  <description>Supported values are LOCAL_USER. Applicable only for insecure cluster deployment.</description>
+    </property>
+
+    <property>
+      <name>fs.viprfs.auth.identity_translation</name>
+      <value>NONE</value>
+	  <description>Supported values are NONE(default), FIXED_REALM, and CURRENT_USER_REALM</description>
+    </property>
+
+    <!--  Moving the configuration to kerberos.json as this is applicable to only secure cluster
+    <property>
+      <name>viprfs.security.principal</name>
+      <value>NONE</value>
+	  <description>Modify the value for secure cluster setup. Provide object engine security principal name using the format: vipr/_HOST@ECS_REALM</description>
+    </property>
+    -->
+    
+    <property>
+      <name>fs.viprfs.impl</name>
+      <value>com.emc.hadoop.fs.vipr.ViPRFileSystem</value>
+	  <final>true</final>
+    </property>
+
+    <property>
+      <name>fs.AbstractFileSystem.viprfs.impl</name>
+      <value>com.emc.hadoop.fs.vipr.ViPRAbstractFileSystem</value>
+	  <final>true</final>
+    </property>
+
+    <property>
+      <name>fs.trace.viprfs.dfs.impl</name>
+      <value>com.emc.hadoop.fs.trace.TraceDistributedFileSystem</value>
+	  <final>true</final>
+    </property>
+
+    <property>
+      <name>fs.trace.viprfs.dfs.inner</name>
+      <value>org.apache.hadoop.hdfs.DistributedFileSystemShim</value>
+	  <final>true</final>
+    </property>
+
+    <property>
+      <name>fs.viprfs.dfs.impl</name>
+      <value>org.apache.hadoop.hdfs.DistributedFileSystemShim</value>
+	  <final>true</final>
+    </property>
+    
 </configuration>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/kerberos.json
new file mode 100644
index 0000000..213c964
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/kerberos.json
@@ -0,0 +1,53 @@
+{
+  "services": [
+    {
+      "name": "ECS",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "hdfs",
+          "principal": {
+            "value": "${hadoop-env/hdfs_user}-${cluster_name}@${realm}",
+            "type" : "user" ,
+            "configuration": "hadoop-env/hdfs_principal_name",
+            "local_username" : "${hadoop-env/hdfs_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hdfs.headless.keytab",
+            "owner": {
+              "name": "${hadoop-env/hdfs_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "hadoop-env/hdfs_user_keytab"
+          }
+        }
+      ],
+      "auth_to_local_properties" : [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.security.authorization": "true",
+            "fs.viprfs.auth.identity_translation": "CURRENT_USER_REALM",
+            "viprfs.security.principal": "",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "ECS_CLIENT"
+        }
+       ]
+    }
+  ]
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/metainfo.xml
index 1a3b742..3f6643b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/metainfo.xml
@@ -34,7 +34,7 @@
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>
-            <configFile>
+          <configFile>
               <type>xml</type>
               <fileName>core-site.xml</fileName>
               <dictionaryName>core-site</dictionaryName>
@@ -48,7 +48,7 @@
               <type>env</type>
               <fileName>hadoop-env.sh</fileName>
               <dictionaryName>hadoop-env</dictionaryName>
-            </configFile>          
+          </configFile>          
         </component>
       </components>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
index b9f9180..9c55b42 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
@@ -29,6 +29,8 @@ class ECSClient(Script):
   def configure(self, env):
     self.setup_config(env)
     self.setup_hadoop_env(env)
+
+  def createdirs(self, env):
     self.create_dirs(env)
 
   def status(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
index 8fe8efb..f2819aa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
@@ -24,6 +24,7 @@ import itertools
 import re
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import hdp_select
 
 config = Script.get_config()
 
@@ -38,11 +39,7 @@ kinit_path_local = functions.get_kinit_path()
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-   hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
-else:
-   hadoop_bin_dir = "/usr/bin"
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
 
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
 smoke_hdfs_user_dir = format("/user/{smoke_user}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/service_check.py
index 76d4252..cc893ec 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/service_check.py
@@ -26,11 +26,22 @@ class ECSServiceCheck(Script):
     env.set_params(params)
 
     # run fs list command to make sure ECS client can talk to ECS backend
-    Execute(format("hadoop fs -ls /"),
-              logoutput=True,
-              tries = 3,
-              try_sleep = 20
+    list_command = format("fs -ls /")
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+        user=params.hdfs_user
+      )
+
+    ExecuteHadoop(list_command,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=20,
+                  bin_dir=params.hadoop_bin_dir
     )
 
+
 if __name__ == "__main__":
   ECSServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/kerberos.json
new file mode 100644
index 0000000..1db82a3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/HBASE/kerberos.json
@@ -0,0 +1,132 @@
+{
+  "services": [
+    {
+      "name": "HBASE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/ECS/hdfs"
+        },
+        {
+          "name": "hbase",
+          "principal": {
+            "value": "${hbase-env/hbase_user}-${cluster_name}@${realm}",
+            "type" : "user",
+            "configuration": "hbase-env/hbase_principal_name",
+            "local_username": "${hbase-env/hbase_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hbase.headless.keytab",
+            "owner": {
+              "name": "${hbase-env/hbase_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "hbase-env/hbase_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hbase-site": {
+            "hbase.security.authentication": "kerberos",
+            "hbase.security.authorization": "true",
+            "zookeeper.znode.parent": "/hbase-secure",
+              "hbase.coprocessor.master.classes": "{{hbase_coprocessor_master_classes}}",
+              "hbase.coprocessor.region.classes": "{{hbase_coprocessor_region_classes}}",
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HBASE_MASTER",
+          "identities": [
+            {
+              "name": "hbase_master_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.master.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.master.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HBASE_REGIONSERVER",
+          "identities": [
+            {
+              "name": "hbase_regionserver_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.regionserver.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.regionserver.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "PHOENIX_QUERY_SERVER",
+          "identities": [
+            {
+              "name": "hbase_queryserver_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/phoenix.queryserver.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/phoenix.queryserver.keytab.file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b179d8de/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
new file mode 100644
index 0000000..9606b59
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/YARN/kerberos.json
@@ -0,0 +1,215 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/ECS/hdfs"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "false",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.yarn.groups": "*",
+            "hadoop.proxyuser.yarn.hosts": "${yarn-site/yarn.resourcemanager.hostname}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/ECS/hdfs"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
+