You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2017/06/28 00:24:19 UTC

[21/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server_upgrade.py
new file mode 100755
index 0000000..aa02f64
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server_upgrade.py
@@ -0,0 +1,300 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import glob
+import os
+import shutil
+import tarfile
+import tempfile
+
+from resource_management.core import shell
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute, Directory
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import compare_versions
+from resource_management.libraries.functions import format_stack_version
+from resource_management.libraries.functions import tar_archive
+from resource_management.libraries.functions import stack_select
+import oozie
+
+BACKUP_TEMP_DIR = "oozie-upgrade-backup"
+BACKUP_CONF_ARCHIVE = "oozie-conf-backup.tar"
+
+
+def backup_configuration():
+  """
+  Backs up the oozie configuration as part of the upgrade process.
+  :return:
+  """
+  Logger.info('Backing up Oozie configuration directory before upgrade...')
+  directoryMappings = _get_directory_mappings()
+
+  absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
+  if not os.path.isdir(absolute_backup_dir):
+    os.makedirs(absolute_backup_dir)
+
+  for directory in directoryMappings:
+    if not os.path.isdir(directory):
+      raise Fail("Unable to backup missing directory {0}".format(directory))
+
+    archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
+    Logger.info('Compressing {0} to {1}'.format(directory, archive))
+
+    if os.path.exists(archive):
+      os.remove(archive)
+
+    # backup the directory, following symlinks instead of including them
+    tar_archive.archive_directory_dereference(archive, directory)
+
+
+def restore_configuration():
+  """
+  Restores the configuration backups to their proper locations after an
+  upgrade has completed.
+  :return:
+  """
+  Logger.info('Restoring Oozie configuration directory after upgrade...')
+  directoryMappings = _get_directory_mappings()
+
+  for directory in directoryMappings:
+    archive = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR,
+      directoryMappings[directory])
+
+    if not os.path.isfile(archive):
+      raise Fail("Unable to restore missing backup archive {0}".format(archive))
+
+    Logger.info('Extracting {0} to {1}'.format(archive, directory))
+
+    tar_archive.untar_archive(archive, directory)
+
+  # cleanup
+  Directory(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR),
+            action="delete"
+  )
+
+
+def prepare_libext_directory():
+  """
+  Creates /usr/iop/current/oozie/libext-customer and recursively sets
+  777 permissions on it and its parents.
+  :return:
+  """
+  import params
+
+  # some versions of IOP don't need the lzo compression libraries
+  target_version_needs_compression_libraries = compare_versions(
+    format_stack_version(params.version), '4.0.0.0') >= 0
+
+  if not os.path.isdir(params.oozie_libext_customer_dir):
+    os.makedirs(params.oozie_libext_customer_dir, 0o777)
+
+  # ensure that it's rwx for all
+  os.chmod(params.oozie_libext_customer_dir, 0o777)
+
+  # get all hadooplzo* JAR files
+  # stack-select set hadoop-client has not run yet, therefore we cannot use
+  # /usr/iop/current/hadoop-client ; we must use params.version directly
+  # however, this only works when upgrading beyond 4.0.0.0; don't do this
+  # for downgrade to 4.0.0.0 since hadoop-lzo will not be present
+  # This can also be called during a Downgrade.
+  # When a version is Intalled, it is responsible for downloading the hadoop-lzo packages
+  # if lzo is enabled.
+  if params.lzo_enabled and (params.upgrade_direction == Direction.UPGRADE or target_version_needs_compression_libraries):
+    hadoop_lzo_pattern = 'hadoop-lzo*.jar'
+    hadoop_client_new_lib_dir = format("/usr/iop/{version}/hadoop/lib")
+
+    files = glob.iglob(os.path.join(hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+    if not files:
+      raise Fail("There are no files at {0} matching {1}".format(
+        hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+    # copy files into libext
+    files_copied = False
+    for file in files:
+      if os.path.isfile(file):
+        Logger.info("Copying {0} to {1}".format(str(file), params.oozie_libext_customer_dir))
+        shutil.copy2(file, params.oozie_libext_customer_dir)
+        files_copied = True
+
+    if not files_copied:
+      raise Fail("There are no files at {0} matching {1}".format(
+        hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+  # copy ext ZIP to customer dir
+  oozie_ext_zip_file = '/usr/share/IOP-oozie/ext-2.2.zip'
+  if not os.path.isfile(oozie_ext_zip_file):
+    raise Fail("Unable to copy {0} because it does not exist".format(oozie_ext_zip_file))
+
+  Logger.info("Copying {0} to {1}".format(oozie_ext_zip_file, params.oozie_libext_customer_dir))
+  shutil.copy2(oozie_ext_zip_file, params.oozie_libext_customer_dir)
+
+def upgrade_oozie_database_and_sharelib():
+  """
+  Performs the creation and upload of the sharelib and the upgrade of the
+  database. This method will also perform a kinit if necessary.
+  It is run before the upgrade of oozie begins exactly once as part of the
+  upgrade orchestration.
+
+  Since this runs before the upgrade has occurred, it should not use any
+  "current" directories since they will still be pointing to the older
+  version of Oozie. Instead, it should use versioned directories to ensure
+  that the commands running are from the oozie version about to be upgraded to.
+  :return:
+  """
+  import params
+  # get the kerberos token if necessary to execute commands as oozie
+  if params.security_enabled:
+    oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+    Execute(command, user=params.oozie_user)
+
+  upgrade_stack = stack_select._get_upgrade_stack()
+  if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
+    raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+  stack_version = upgrade_stack[1]
+
+  # upgrade oozie DB
+  Logger.info('Upgrading the Oozie database...')
+  oozie.download_database_library_if_needed()
+  database_upgrade_command = "/usr/iop/{0}/oozie/bin/ooziedb.sh upgrade -run".format(stack_version)
+  Execute(database_upgrade_command, user=params.oozie_user, logoutput=True)
+  create_sharelib()
+
+def create_sharelib():
+  """
+  Performs the creation and upload of the sharelib.
+  This method will also perform a kinit if necessary.
+  It is run before the upgrade of oozie begins exactly once as part of the
+  upgrade orchestration.
+
+  Since this runs before the upgrade has occurred, it should not use any
+  "current" directories since they will still be pointing to the older
+  version of Oozie. Instead, it should use versioned directories to ensure
+  that the commands running are from the oozie version about to be upgraded to.
+  """
+  import params
+  Logger.info('Creating a new sharelib and uploading it to HDFS...')
+  # ensure the oozie directory exists for the sharelib
+  params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+    action = "create_on_execute",
+    type = "directory",
+    owner = "oozie",
+    group = "hadoop",
+    mode = 0755,
+    recursive_chmod = True)
+
+  params.HdfsResource(None, action = "execute")
+
+  upgrade_stack = stack_select._get_upgrade_stack()
+  if upgrade_stack is None or upgrade_stack[1] is None:
+    raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+  stack_version = upgrade_stack[1]
+
+  # install new sharelib to HDFS
+  sharelib_command = "/usr/iop/{0}/oozie/bin/oozie-setup.sh sharelib create -fs {1}".format(
+    stack_version, params.fs_root)
+  Execute(sharelib_command, user=params.oozie_user, logoutput=True)
+
+
+def upgrade_oozie():
+  """
+  Performs the upgrade of the oozie WAR file and database.
+  :return:
+  """
+  import params
+
+  # get the kerberos token if necessary to execute commands as oozie
+  if params.security_enabled:
+    oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+    Execute(command, user=params.oozie_user)
+
+  # ensure that HDFS is prepared to receive the new sharelib
+  command = format("hdfs dfs -chown {params.oozie_user}:{params.user_group} {oozie_hdfs_user_dir}/share")
+  Execute(command, user=params.oozie_user)
+
+  command = format("hdfs dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+  Execute(command, user=params.oozie_user)
+
+  # upgrade oozie DB
+  command = format("{oozie_home}/bin/ooziedb.sh upgrade -run")
+  Execute(command, user=params.oozie_user)
+
+  # prepare the oozie WAR
+  '''command = format("{oozie_setup_sh} prepare-war -d {oozie_libext_customer_dir}")
+  return_code, oozie_output = shell.call(command)
+
+  if return_code != 0 or "New Oozie WAR file with added" not in oozie_output:
+    message = "Unexpected Oozie WAR preparation output {0}".format(oozie_output)
+    Logger.error(message)
+    raise Fail(message)'''
+
+  # install new sharelib to HDFS
+  command = format("{oozie_setup_sh} sharelib create -fs {fs_root}")
+  Execute(command, user=params.oozie_user)
+
+
+def _get_directory_mappings():
+  """
+  Gets a dictionary of directory to archive name that represents the
+  directories that need to be backed up and their output tarball archive targets
+  :return:  the dictionary of directory to tarball mappings
+  """
+  import params
+
+  return { params.conf_dir : BACKUP_CONF_ARCHIVE }
+
+def prepare_warfile():
+  """
+  Invokes the 'prepare-war' command in Oozie in order to create the WAR.
+  The prepare-war command uses the input WAR from ${OOZIE_HOME}/oozie.war and
+  outputs the prepared WAR to ${CATALINA_BASE}/webapps/oozie.war - because of this,
+  both of these environment variables must point to the upgraded oozie-server path and
+  not oozie-client since it was not yet updated.
+
+  This method will also perform a kinit if necessary.
+  :return:
+  """
+  import params
+
+  # get the kerberos token if necessary to execute commands as oozie
+  if params.security_enabled:
+    oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+    Execute(command, user=params.oozie_user, logoutput=True)
+
+  # setup environment
+  environment = { "CATALINA_BASE" : "/usr/iop/current/oozie-server/oozie-server",
+    "OOZIE_HOME" : "/usr/iop/current/oozie-server" }
+
+  # prepare the oozie WAR
+  command = format("{oozie_setup_sh} prepare-war")
+  return_code, oozie_output = shell.call(command, user=params.oozie_user,
+    logoutput=True, quiet=False, env=environment)
+
+  # set it to "" in to prevent a possible iteration issue
+  if oozie_output is None:
+    oozie_output = ""
+
+  if return_code != 0 or "New Oozie WAR file with added".lower() not in oozie_output.lower():
+    message = "Unexpected Oozie WAR preparation output {0}".format(oozie_output)
+    Logger.error(message)
+    raise Fail(message)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_service.py
new file mode 100755
index 0000000..485686f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_service.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import time
+from resource_management import *
+from resource_management.core.shell import as_user
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from ambari_commons import OSConst
+
+def oozie_service(action = 'start', upgrade_type=None):
+  """
+  Starts or stops the Oozie service
+  :param action: 'start' or 'stop'
+  :param upgrade_type: type of upgrade, either "rolling" or "non_rolling"
+  skipped since a variation of them was performed during the rolling upgrade
+  :return:
+  """
+  import params
+
+  environment={'OOZIE_CONFIG': params.conf_dir}
+
+  if params.security_enabled:
+    if params.oozie_principal is None:
+      oozie_principal_with_host = 'missing_principal'
+    else:
+      oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host};")
+  else:
+    kinit_if_needed = ""
+
+  no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
+
+  if action == 'start':
+    start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-start.sh")
+
+    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+       params.jdbc_driver_name == "org.postgresql.Driver" or \
+       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{target} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
+    else:
+      db_connection_check_command = None
+
+    if upgrade_type is None:
+
+      if not os.path.isfile(params.target) and params.jdbc_driver_name == "org.postgresql.Driver":
+        print format("ERROR: jdbc file {target} is unavailable. Please, follow next steps:\n" \
+          "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
+          "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
+          "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
+          "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
+        exit(1)
+
+      if db_connection_check_command:
+        Execute( db_connection_check_command,
+                 tries=5,
+                 try_sleep=10,
+                 user=params.oozie_user,
+        )
+
+      Execute( format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run"),
+               user = params.oozie_user, not_if = no_op_test,
+               ignore_failures = True
+      )
+
+      if params.security_enabled:
+        Execute(kinit_if_needed,
+                user = params.oozie_user,
+        )
+      if params.host_sys_prepped:
+        print "Skipping creation of oozie sharelib as host is sys prepped"
+        hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
+      elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+        # check with webhdfs is much faster than executing hadoop fs -ls.
+        util = WebHDFSUtil(params.hdfs_site, params.oozie_user, params.security_enabled)
+        list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+        hdfs_share_dir_exists = ('FileStatus' in list_status)
+      else:
+        # have to do time expensive hadoop fs -ls check.
+        hdfs_share_dir_exists = shell.call(format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls {hdfs_share_dir} | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
+                                 user=params.oozie_user)[0]
+
+      if not hdfs_share_dir_exists:
+        Execute( params.put_shared_lib_to_hdfs_cmd,
+                 user = params.oozie_user,
+                 path = params.execute_path
+        )
+        params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+                             type="directory",
+                             action="create_on_execute",
+                             mode=0755,
+                             recursive_chmod=True,
+        )
+        params.HdfsResource(None, action="execute")
+
+
+    # start oozie
+    Execute( start_cmd, environment=environment, user = params.oozie_user,
+	  not_if = no_op_test )
+
+  elif action == 'stop':
+    stop_cmd  = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-stop.sh")
+    # stop oozie
+    Execute(stop_cmd, environment=environment, only_if  = no_op_test,
+      user = params.oozie_user)
+    File(params.pid_file, action = "delete")
+    # Wait a bit more to wait database(Derby) shutdown completely, since it only allow one JVM connected.
+    time.sleep(10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/params.py
new file mode 100755
index 0000000..febd3e6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/params.py
@@ -0,0 +1,259 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.core import System
+from resource_management.libraries import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_port_from_url
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.resources import HdfsResource
+from resource_management.libraries.functions import conf_select, stack_select
+from urlparse import urlparse
+import status_params
+import itertools
+import os
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+hostname = config["hostname"]
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+#hadoop params
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+
+
+# if this is a server action, then use the server binaries; smoke tests
+# use the client binaries
+server_role_dir_mapping = { 'OOZIE_SERVER' : 'oozie-server',
+  'OOZIE_SERVICE_CHECK' : 'oozie-client' }
+
+command_role = default("/role", "")
+if command_role not in server_role_dir_mapping:
+  command_role = 'OOZIE_SERVICE_CHECK'
+
+oozie_root = server_role_dir_mapping[command_role]
+
+
+# using the correct oozie root dir, format the correct location
+oozie_lib_dir = format("/usr/iop/current/{oozie_root}")
+oozie_setup_sh = format("/usr/iop/current/{oozie_root}/bin/oozie-setup.sh")
+oozie_webapps_dir = format("/usr/iop/current/{oozie_root}/oozie-server/webapps")
+oozie_webapps_conf_dir = format("/usr/iop/current/{oozie_root}/oozie-server/conf")
+oozie_libext_dir = format("/usr/iop/current/{oozie_root}/libext")
+#oozie_libext_customer_dir = format("/usr/iop/current/{oozie_root}/libext-customer")
+oozie_server_dir = format("/usr/iop/current/{oozie_root}/oozie-server")
+oozie_shared_lib = format("/usr/iop/current/{oozie_root}/share")
+oozie_home = format("/usr/iop/current/{oozie_root}")
+oozie_bin_dir = format("/usr/iop/current/{oozie_root}/bin")
+oozie_examples_regex = format("/usr/iop/current/{oozie_root}/doc")
+
+# set the falcon home for copying JARs; if in an upgrade, then use the version of falcon that matches the version of oozie
+falcon_home = '/usr/iop/current/falcon-client'
+if stack_version is not None:
+  falcon_home = '/usr/iop/{0}/falcon'.format(stack_version)
+execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+conf_dir = status_params.conf_dir
+hive_conf_dir = format("{conf_dir}/action-conf/hive")
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+oozie_admin_users = format(config['configurations']['oozie-env']['oozie_admin_users'])
+user_group = config['configurations']['cluster-env']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+oozie_tmp_dir = "/var/tmp/oozie"
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+java_share_dir = "/usr/share/java"
+# Dependency on ext_js  not supported on IOP
+#ext_js_file = "ext-2.2.zip"
+#ext_js_path = "/usr/share/iop-oozie/ext-2.2.zip"
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+oozie_initial_heapsize = str(config['configurations']['oozie-env']['oozie_initial_heapsize']).rstrip('m') + 'm'
+oozie_heapsize = str(config['configurations']['oozie-env']['oozie_heapsize']).rstrip('m') + 'm'
+oozie_permsize = str(config['configurations']['oozie-env']['oozie_permsize']).rstrip('m') + 'm'
+
+kinit_path_local = get_kinit_path()
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+http_principal = config['configurations']['oozie-site']['oozie.authentication.kerberos.principal']
+oozie_site = config['configurations']['oozie-site']
+if security_enabled:
+  #older versions of oozie have problems when using _HOST in principal
+  #by testing, newer versions of oozie also need this replacement
+  oozie_site = dict(config['configurations']['oozie-site'])
+  oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = \
+    oozie_principal.replace('_HOST', hostname)
+  oozie_site['oozie.authentication.kerberos.principal'] = \
+    http_principal.replace('_HOST', hostname)
+
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+oozie_keytab = default("/configurations/oozie-env/oozie_keytab", oozie_service_keytab)
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+
+java_home = config['hostLevelParams']['java_home']
+java_version = config['hostLevelParams']['java_version']
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
+oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
+if 'export OOZIE_HTTPS_PORT' in oozie_env_sh_template or 'oozie.https.port' in config['configurations']['oozie-site'] or 'oozie.https.keystore.file' in config['configurations']['oozie-site'] or 'oozie.https.keystore.pass' in config['configurations']['oozie-site']:
+  oozie_secure = '-secure'
+else:
+  oozie_secure = ''
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+
+put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
+
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+
+if jdbc_driver_name == "com.mysql.jdbc.Driver":
+  #jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+  jdbc_driver_jar = "mysql-connector-java.jar"
+  jdbc_symlink_name = "mysql-jdbc-driver.jar"
+elif jdbc_driver_name == "org.postgresql.Driver":
+  jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar")  #oozie using it's own postgres jdbc
+  jdbc_symlink_name = "postgres-jdbc-driver.jar"
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+  #jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
+  jdbc_driver_jar = "ojdbc.jar"
+  jdbc_symlink_name = "oracle-jdbc-driver.jar"
+else:
+  jdbc_driver_jar = ""
+  jdbc_symlink_name = ""
+
+driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
+driver_curl_target = format("{java_share_dir}/{jdbc_driver_jar}")
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+if jdbc_driver_name == "org.postgresql.Driver":
+  target = jdbc_driver_jar
+else:
+  target = format("{oozie_libext_dir}/{jdbc_driver_jar}")
+
+hostname = config["hostname"]
+hdfs_share_dir = format("{oozie_hdfs_user_dir}/share")
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+has_falcon_host = not len(falcon_host)  == 0
+
+#oozie-log4j.properties
+if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
+  log4j_props = config['configurations']['oozie-log4j']['content']
+else:
+  log4j_props = None
+
+oozie_hdfs_user_mode = 0775
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+https_port = None
+# try to get https port form oozie-env content
+for line in oozie_env_sh_template.splitlines():
+  result = re.match(r"export\s+OOZIE_HTTPS_PORT=(\d+)", line)
+  if result is not None:
+    https_port = result.group(1)
+# or from oozie-site.xml
+if https_port is None and 'oozie.https.port' in config['configurations']['oozie-site']:
+  https_port = config['configurations']['oozie-site']['oozie.https.port']
+
+oozie_base_url = config['configurations']['oozie-site']['oozie.base.url']
+
+# construct proper url for https
+if https_port is not None:
+  parsed_url = urlparse(oozie_base_url)
+  oozie_base_url = oozie_base_url.replace(parsed_url.scheme, "https")
+  if parsed_url.port is None:
+    oozie_base_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
+  else:
+    oozie_base_url = oozie_base_url.replace(str(parsed_url.port), str(https_port))
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+
+#LZO support
+
+#-----LZO is not suppported in IOP distribution since it is GPL license--------
+
+'''
+io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+# stack_is_iop40_or_further
+underscored_version = stack_version_unformatted.replace('.', '_')
+dashed_version = stack_version_unformatted.replace('.', '-')
+lzo_packages_to_family = {
+  "any": ["hadoop-lzo", ],
+  "redhat": ["lzo", "hadoop-lzo-native"],
+  "suse": ["lzo", "hadoop-lzo-native"],
+  "ubuntu": ["liblzo2-2", ]
+}
+
+
+lzo_packages_to_family["redhat"] += [format("hadooplzo_{underscorred_version}_*")]
+lzo_packages_to_family["suse"] += [format("hadooplzo_{underscorred_version}_*")]
+lzo_packages_to_family["ubuntu"] += [format("hadooplzo_{dashed_version}_*")]
+
+lzo_packages_for_current_host = lzo_packages_to_family['any'] + lzo_packages_to_family[System.get_instance().os_family]
+all_lzo_packages = set(itertools.chain(*lzo_packages_to_family.values()))
+'''

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/service_check.py
new file mode 100755
index 0000000..fdfd552
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/service_check.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import glob
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile
+from resource_management.core.system import System
+from resource_management.libraries.functions import format
+from resource_management.libraries.script import Script
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.exceptions import Fail
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.core.logger import Logger
+
+class OozieServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class OozieServiceCheckDefault(OozieServiceCheck):
+
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    prepare_hdfs_file_name = 'prepareOozieHdfsDirectories.sh'
+    smoke_test_file_name = 'oozieSmoke2.sh'
+
+    if 'yarn-site' in params.config['configurations']:
+      XmlConfig("yarn-site.xml",
+                conf_dir=params.hadoop_conf_dir,
+                configurations=params.config['configurations']['yarn-site'],
+                owner=params.hdfs_user,
+                group=params.user_group,
+                mode=0644
+      )
+    else:
+      raise Fail("yarn-site.xml was not present in config parameters.")
+
+    OozieServiceCheckDefault.oozie_smoke_shell_file(smoke_test_file_name, prepare_hdfs_file_name)
+
+  @staticmethod
+  def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name):
+    import params
+
+    File(format("{tmp_dir}/{file_name}"),
+         content=StaticFile(file_name),
+         mode=0755
+    )
+    File(format("{tmp_dir}/{prepare_hdfs_file_name}"),
+         content=StaticFile(prepare_hdfs_file_name),
+         mode=0755
+    )
+
+    os_family = System.get_instance().os_family
+    oozie_examples_dir = glob.glob(params.oozie_examples_regex)[0]
+    oozie_examples_tar_file = os.path.join(oozie_examples_dir,"oozie-examples.tar.gz");
+    if not os.path.isfile(oozie_examples_tar_file):
+      oozie_examples_dir = glob.glob(os.path.join(oozie_examples_dir,"oozie-4.2.0_IBM*"))[0]
+
+    Execute(format("{tmp_dir}/{prepare_hdfs_file_name} {conf_dir} {oozie_examples_dir} {hadoop_conf_dir} "),
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+    examples_dir = format('/user/{smokeuser}/examples')
+    params.HdfsResource(examples_dir,
+                        action = "delete_on_execute",
+                        type = "directory"
+    )
+    params.HdfsResource(examples_dir,
+      action = "create_on_execute",
+      type = "directory",
+      source = format("{oozie_examples_dir}/examples"),
+      owner = params.smokeuser,
+      group = params.user_group
+    )
+
+    input_data_dir = format('/user/{smokeuser}/input-data')
+    params.HdfsResource(input_data_dir,
+                        action = "delete_on_execute",
+                        type = "directory"
+    )
+    params.HdfsResource(input_data_dir,
+      action = "create_on_execute",
+      type = "directory",
+      source = format("{oozie_examples_dir}/examples/input-data"),
+      owner = params.smokeuser,
+      group = params.user_group
+    )
+    params.HdfsResource(None, action="execute")
+
+    if params.security_enabled:
+      sh_cmd = format(
+        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}")
+    else:
+      sh_cmd = format(
+        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}")
+
+    Execute(sh_cmd,
+            path=params.execute_path,
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class OozieServiceCheckWindows(OozieServiceCheck):
+
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
+    service = "OOZIE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+  OozieServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/status_params.py
new file mode 100755
index 0000000..58d5b86
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/status_params.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with /usr/iop/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'OOZIE_SERVER' : 'oozie-server',
+  'OOZIE_CLIENT' : 'oozie-client',
+  'OOZIE_SERVICE_CHECK' : 'oozie-client',
+  'ru_execute_tasks' : 'oozie-server'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "OOZIE_CLIENT")
+
+config = Script.get_config()
+
+oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
+pid_file = format("{oozie_pid_dir}/oozie.pid")
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+conf_dir = format("/usr/iop/current/{component_directory}/conf")
+tmp_dir = Script.get_tmp_dir()
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+hostname = config["hostname"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/adminusers.txt.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/adminusers.txt.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/adminusers.txt.j2
new file mode 100755
index 0000000..2a0f7b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/adminusers.txt.j2
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Users should be set using following rules:
+#
+#     One user name per line
+#     Empty lines and lines starting with '#' are ignored
+
+{% if oozie_admin_users %}
+{% for oozie_admin_user in oozie_admin_users.split(',') %}
+{{oozie_admin_user|trim}}
+{% endfor %}
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/oozie-log4j.properties.j2
new file mode 100755
index 0000000..e39428f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/oozie-log4j.properties.j2
@@ -0,0 +1,93 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+log4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd}
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-env.xml
new file mode 100755
index 0000000..4cec418
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-env.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- pig-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for pig-env.sh file</description>
+    <value>
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+#if [ -d "/usr/lib/tez" ]; then
+#  PIG_OPTS="$PIG_OPTS -Dmapreduce.framework.name=yarn"
+#fi
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-log4j.xml
new file mode 100755
index 0000000..4f656f4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-log4j.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# ***** Set root logger level to DEBUG and its only appender to A.
+log4j.logger.org.apache.pig=info, A
+
+# ***** A is set to be a ConsoleAppender.
+log4j.appender.A=org.apache.log4j.ConsoleAppender
+# ***** A uses PatternLayout.
+log4j.appender.A.layout=org.apache.log4j.PatternLayout
+log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-properties.xml
new file mode 100755
index 0000000..ea0fab5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-properties.xml
@@ -0,0 +1,631 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Pig configuration file. All values can be overwritten by command line
+# arguments; for a description of the properties, run
+#
+#     pig -h properties
+#
+
+############################################################################
+#
+# == Logging properties
+#
+
+# Location of pig log file. If blank, a file with a timestamped slug
+# ('pig_1399336559369.log') will be generated in the current working directory.
+#
+# pig.logfile=
+# pig.logfile=/tmp/pig-err.log
+
+# Log4j configuration file. Set at runtime with the -4 parameter. The source
+# distribution has a ./conf/log4j.properties.template file you can rename and
+# customize.
+#
+# log4jconf=./conf/log4j.properties
+
+# Verbose Output.
+# * false (default): print only INFO and above to screen
+# * true: Print all log messages to screen
+#
+# verbose=false
+
+# Omit timestamps on log messages. (default: false)
+#
+# brief=false
+
+# Logging level. debug=OFF|ERROR|WARN|INFO|DEBUG (default: INFO)
+#
+# debug=INFO
+
+# Roll up warnings across tasks, so that when millions of mappers suddenly cry
+# out in error they are partially silenced. (default, recommended: true)
+#
+# aggregate.warning=true
+
+# Should DESCRIBE pretty-print its schema?
+# * false (default): print on a single-line, suitable for pasting back in to your script
+# * true (recommended): prints on multiple lines with indentation, much more readable
+#
+# pig.pretty.print.schema=false
+
+# === Profiling UDFs  ===
+
+# Turn on UDF timers? This will cause two counters to be
+# tracked for every UDF and LoadFunc in your script: approx_microsecs measures
+# approximate time spent inside a UDF approx_invocations reports the approximate
+# number of times the UDF was invoked.
+#
+# * false (default): do not record timing information of UDFs.
+# * true: report UDF performance. Uses more counters, but gives more insight
+#   into script operation
+#
+# pig.udf.profile=false
+
+# Specify frequency of profiling (default: every 100th).
+# pig.udf.profile.frequency=100
+
+############################################################################
+#
+# == Site-specific Properties
+#
+
+# Execution Mode. Local mode is much faster, but only suitable for small amounts
+# of data. Local mode interprets paths on the local file system; Mapreduce mode
+# on the HDFS. Read more under 'Execution Modes' within the Getting Started
+# documentation.
+#
+# * mapreduce (default): use the Hadoop cluster defined in your Hadoop config files
+# * local: use local mode
+#
+# exectype=mapreduce
+
+# Bootstrap file with default statements to execute in every Pig job, similar to
+# .bashrc.  If blank, uses the file '.pigbootup' from your home directory; If a
+# value is supplied, that file is NOT loaded.  This does not do tilde expansion
+# -- you must supply the full path to the file.
+#
+# pig.load.default.statements=
+# pig.load.default.statements=/home/bob/.pigrc
+
+# Kill all waiting/running MR jobs upon a MR job failure? (default: false) If
+# false, jobs that can proceed independently will do so unless a parent stage
+# fails. If true, the failure of any stage in the script kills all jobs.
+#
+# stop.on.failure=false
+
+# File containing the pig script to run. Rarely set in the properties file.
+# Commandline: -f
+#
+# file=
+
+# Jarfile to load, colon separated. Rarely used.
+#
+# jar=
+
+# Register additional .jar files to use with your Pig script.
+# Most typically used as a command line option (see http://pig.apache.org/docs/r0.12.0/basic.html#register):
+#
+#     pig -Dpig.additional.jars=hdfs://nn.mydomain.com:9020/myjars/my.jar
+#
+# pig.additional.jars=&lt;colon separated list of jars with optional wildcards&gt;
+# pig.additional.jars=/usr/local/share/pig/pig/contrib/piggybank/java/piggybank.jar:/usr/local/share/pig/datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar
+
+# Specify potential packages to which a UDF or a group of UDFs belong,
+# eliminating the need to qualify the UDF on every call. See
+# http://pig.apache.org/docs/r0.12.0/udf.html#use-short-names
+#
+# Commandline use:
+#
+#     pig \
+#       -Dpig.additional.jars=$PIG_HOME/contrib/piggybank/java/piggybank.jar:$PIG_HOME/../datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar \
+#       -Dudf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.util \
+#       happy_job.pig
+#
+# udf.import.list=&lt;colon separated list of imports&gt;
+# udf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.bags:datafu.pig.hash:datafu.pig.stats:datafu.pig.util
+
+#
+# Reuse jars across jobs run by the same user? (default: false) If enabled, jars
+# are placed in ${pig.user.cache.location}/${user.name}/.pigcache. Since most
+# jars change infrequently, this gives a minor speedup.
+#
+# pig.user.cache.enabled=false
+
+# Base path for storing jars cached by the pig.user.cache.enabled feature. (default: /tmp)
+#
+# pig.user.cache.location=/tmp
+
+# Replication factor for cached jars. If not specified mapred.submit.replication
+# is used, whose default is 10.
+#
+# pig.user.cache.replication=10
+
+# Default UTC offset. (default: the host's current UTC offset) Supply a UTC
+# offset in Java's timezone format: e.g., +08:00.
+#
+# pig.datetime.default.tz=
+
+############################################################################
+#
+# Memory impacting properties
+#
+
+# Amount of memory (as fraction of heap) allocated to bags before a spill is
+# forced. Default is 0.2, meaning 20% of available memory. Note that this memory
+# is shared across all large bags used by the application. See
+# http://pig.apache.org/docs/r0.12.0/perf.html#memory-management
+#
+# pig.cachedbag.memusage=0.2
+
+# Don't spill bags smaller than this size (bytes). Default: 5000000, or about
+# 5MB. Usually, the more spilling the longer runtime, so you might want to tune
+# it according to heap size of each task and so forth.
+#
+# pig.spill.size.threshold=5000000
+
+# EXPERIMENTAL: If a file bigger than this size (bytes) is spilled -- thus
+# freeing a bunch of ram -- tell the JVM to perform garbage collection.  This
+# should help reduce the number of files being spilled, but causes more-frequent
+# garbage collection. Default: 40000000 (about 40 MB)
+#
+# pig.spill.gc.activation.size=40000000
+
+# Maximum amount of data to replicate using the distributed cache when doing
+# fragment-replicated join. (default: 1000000000, about 1GB) Consider increasing
+# this in a production environment, but carefully.
+#
+# pig.join.replicated.max.bytes=1000000000
+
+# Fraction of heap available for the reducer to perform a skewed join. A low
+# fraction forces Pig to use more reducers, but increases the copying cost. See
+# http://pig.apache.org/docs/r0.12.0/perf.html#skewed-joins
+#
+# pig.skewedjoin.reduce.memusage=0.3
+
+#
+# === SchemaTuple ===
+#
+# The SchemaTuple feature (PIG-2632) uses a tuple's schema (when known) to
+# generate a custom Java class to hold records. Otherwise, tuples are loaded as
+# a plain list that is unaware of its contents' schema -- and so each element
+# has to be wrapped as a Java object on its own. This can provide more efficient
+# CPU utilization, serialization, and most of all memory usage.
+#
+# This feature is considered experimental and is off by default. You can
+# selectively enable it for specific operations using pig.schematuple.udf,
+# pig.schematuple.load, pig.schematuple.fr_join and pig.schematuple.merge_join
+#
+
+# Enable the SchemaTuple optimization in all available cases? (default: false; recommended: true)
+#
+# pig.schematuple=false
+
+# EXPERIMENTAL: Use SchemaTuples with UDFs (default: value of pig.schematuple).
+# pig.schematuple.udf=false
+
+# EXPERIMENTAL, CURRENTLY NOT IMPLEMENTED, but in the future, LoadFunc's with
+# known schemas should output SchemaTuples. (default: value of pig.schematuple)
+# pig.schematuple.load=false
+
+# EXPERIMENTAL: Use SchemaTuples in replicated joins. The potential memory
+# saving here is significant. (default: value of pig.schematuple)
+# pig.schematuple.fr_join=false
+
+# EXPERIMENTAL: Use SchemaTuples in merge joins. (default: value of pig.schematuple).
+# pig.schematuple.merge_join=false
+
+############################################################################
+#
+# Serialization options
+#
+
+# Omit empty part files from the output? (default: false)
+#
+# * false (default): reducers generates an output file, even if output is empty
+# * true (recommended): do not generate zero-byte part files
+#
+# The default behavior of MapReduce is to generate an empty file for no data, so
+# Pig follows that. But many small files can cause annoying extra map tasks and
+# put load on the HDFS, so consider setting this to 'true'
+#
+# pig.output.lazy=false
+
+#
+# === Tempfile Handling
+#
+
+# EXPERIMENTAL: Storage format for temporary files generated by intermediate
+# stages of Pig jobs. This can provide significant speed increases for certain
+# codecs, as reducing the amount of data transferred to and from disk can more
+# than make up for the cost of compression/compression. Recommend that you set
+# up LZO compression in Hadoop and specify tfile storage.
+#
+# Compress temporary files?
+# * false (default): do not compress
+# * true (recommended): compress temporary files.
+#
+# pig.tmpfilecompression=false
+# pig.tmpfilecompression=true
+
+# Tempfile storage container type.
+#
+# * tfile (default, recommended): more efficient, but only supports supports gz(gzip) and lzo compression.
+#   https://issues.apache.org/jira/secure/attachment/12396286/TFile%20Specification%2020081217.pdf
+# * seqfile: only supports gz(gzip), lzo, snappy, and bzip2 compression
+#
+# pig.tmpfilecompression.storage=tfile
+
+# Codec types for intermediate job files. tfile supports gz(gzip) and lzo;
+# seqfile support gz(gzip), lzo, snappy, bzip2
+#
+# * lzo (recommended with caveats): moderate compression, low cpu burden;
+#   typically leads to a noticeable speedup. Best default choice, but you must
+#   set up LZO independently due to license incompatibility
+# * snappy: moderate compression, low cpu burden; typically leads to a noticeable speedup..
+# * gz (default): higher compression, high CPU burden. Typically leads to a noticeable slowdown.
+# * bzip2: most compression, major CPU burden. Typically leads to a noticeable slowdown.
+#
+# pig.tmpfilecompression.codec=gzip
+
+#
+# === Split Combining
+#
+
+#
+# Should pig try to combine small files for fewer map tasks? This improves the
+# efficiency of jobs with many small input files, reduces the overhead on the
+# jobtracker, and reduces the number of output files a map-only job
+# produces. However, it only works with certain loaders and increases non-local
+# map tasks. See http://pig.apache.org/docs/r0.12.0/perf.html#combine-files
+#
+# * false (default, recommended): _do_ combine files
+# * true: do not combine files
+#
+# pig.noSplitCombination=false
+
+#
+# Size, in bytes, of data to be processed by a single map. Smaller files are
+# combined untill this size is reached. If unset, defaults to the file system's
+# default block size.
+#
+# pig.maxCombinedSplitSize=
+
+# ###########################################################################
+#
+# Execution options
+#
+
+# Should pig omit combiners? (default, recommended: false -- meaning pig _will_
+# use combiners)
+#
+# When combiners work well, they eliminate a significant amount of
+# data. However, if they do not eliminate much data -- say, a DISTINCT operation
+# that only eliminates 5% of the records -- they add a noticeable overhead to
+# the job. So the recommended default is false (use combiners), selectively
+# disabling them per-job:
+#
+#     pig -Dpig.exec.nocombiner=true distinct_but_not_too_much.pig
+#
+# pig.exec.nocombiner=false
+
+# EXPERIMENTAL: Aggregate records in map task before sending to the combiner?
+# (default: false, 10; recommended: true, 10). In cases where there is a massive
+# reduction of data in the aggregation step, pig can do a first pass of
+# aggregation before the data even leaves the mapper, saving much serialization
+# overhead. It's off by default but can give a major improvement to
+# group-and-aggregate operations. Pig skips partial aggregation unless reduction
+# is better than a factor of minReduction (default: 10). See
+# http://pig.apache.org/docs/r0.12.0/perf.html#hash-based-aggregation
+#
+# pig.exec.mapPartAgg=false
+# pig.exec.mapPartAgg.minReduction=10
+
+#
+# === Control how many reducers are used.
+#
+
+# Estimate number of reducers naively using a fixed amount of data per
+# reducer. Optimally, you have both fewer reducers than available reduce slots,
+# and reducers that are neither getting too little data (less than a half-GB or
+# so) nor too much data (more than 2-3 times the reducer child process max heap
+# size). The default of 1000000000 (about 1GB) is probably low for a production
+# cluster -- however it's much worse to set this too high (reducers spill many
+# times over in group-sort) than too low (delay waiting for reduce slots).
+#
+# pig.exec.reducers.bytes.per.reducer=1000000000
+
+#
+# Don't ever use more than this many reducers. (default: 999)
+#
+# pig.exec.reducers.max=999
+
+#
+# === Local mode for small jobs
+#
+
+# EXPERIMENTAL: Use local mode for small jobs? If true, jobs with input data
+# size smaller than pig.auto.local.input.maxbytes bytes and one or no reducers
+# are run in local mode, which is much faster. Note that file paths are still
+# interpreted as pig.exectype implies.
+#
+# * true (recommended): allow local mode for small jobs, which is much faster.
+# * false (default): always use pig.exectype.
+#
+# pig.auto.local.enabled=false
+
+#
+# Definition of a small job for the pig.auto.local.enabled feature. Only jobs
+# with less than this may bytes are candidates to run locally (default:
+# 100000000 bytes, about 1GB)
+#
+# pig.auto.local.input.maxbytes=100000000
+
+############################################################################
+#
+# Security Features
+#
+
+# Comma-delimited list of commands/operators that are disallowed. This security
+# feature can be used by administrators to block use of certain commands by
+# users.
+#
+# * &lt;blank&gt; (default): all commands and operators are allowed.
+# * fs,set (for example): block all filesystem commands and config changes from pig scripts.
+#
+# pig.blacklist=
+# pig.blacklist=fs,set
+
+# Comma-delimited list of the only commands/operators that are allowed. This
+# security feature can be used by administrators to block use of certain
+# commands by users.
+#
+# * &lt;blank&gt; (default): all commands and operators not on the pig.blacklist are allowed.
+# * load,store,filter,group: only LOAD, STORE, FILTER, GROUP
+#   from pig scripts. All other commands and operators will fail.
+#
+# pig.whitelist=
+# pig.whitelist=load,store,filter,group
+
+#####################################################################
+#
+# Advanced Site-specific Customizations
+#
+
+# Remove intermediate output files?
+#
+# * true (default, recommended): remove the files
+# * false: do NOT remove the files. You must clean them up yourself.
+#
+# Keeping them is useful for advanced debugging, but can be dangerous -- you
+# must clean them up yourself.  Inspect the intermediate outputs with
+#
+#     LOAD '/path/to/tmp/file' USING org.apache.pig.impl.io.TFileStorage();
+#
+# (Or ...SequenceFileInterStorage if pig.tmpfilecompression.storage is seqfile)
+#
+# pig.delete.temp.files=true
+
+# EXPERIMENTAL: A Pig Progress Notification Listener (PPNL) lets you wire pig's
+# progress into your visibility stack. To use a PPNL, supply the fully qualified
+# class name of a PPNL implementation. Note that only one PPNL can be set up, so
+# if you need several, write a PPNL that will chain them.
+#
+# See https://github.com/twitter/ambrose for a pretty awesome one of these
+#
+# pig.notification.listener=&lt;fully qualified class name of a PPNL implementation&gt;
+
+# String argument to pass to your PPNL constructor (optional). Only a single
+# string value is allowed. (default none)
+#
+# pig.notification.listener.arg=&lt;somevalue&gt;
+
+# EXPERIMENTAL: Class invoked to estimate the number of reducers to use.
+# (default: org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator)
+#
+# If you don't know how or why to write a PigReducerEstimator, you're unlikely
+# to use this. By default, the naive mapReduceLayer.InputSizeReducerEstimator is
+# used, but you can specify anything implementing the interface
+# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigReducerEstimator
+#
+# pig.exec.reducer.estimator=&lt;fully qualified class name of a PigReducerEstimator implementation&gt;
+
+# Optional String argument to pass to your PigReducerEstimator. (default: none;
+# a single String argument is allowed).
+#
+# pig.exec.reducer.estimator.arg=&lt;somevalue&gt;
+
+# Class invoked to report the size of reducers output. By default, the reducers'
+# output is computed as the total size of output files. But not every storage is
+# file-based, and so this logic can be replaced by implementing the interface
+# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigStatsOutputSizeReader
+# If you need to register more than one reader, you can register them as a comma
+# separated list. Every reader implements a boolean supports(POStore sto) method.
+# When there are more than one reader, they are consulted in order, and the
+# first one whose supports() method returns true will be used.
+#
+# pig.stats.output.size.reader=&lt;fully qualified class name of a PigStatsOutputSizeReader implementation&gt;
+# pig.stats.output.size.reader.unsupported=&lt;comma separated list of StoreFuncs that are not supported by this reader&gt;
+
+# By default, Pig retrieves TaskReports for every launched task to compute
+# various job statistics. But this can cause OOM if the number of tasks is
+# large. In such case, you can disable it by setting this property to true.
+# pig.stats.notaskreport=false
+
+#
+# Override hadoop configs programatically
+#
+# By default, Pig expects hadoop configs (hadoop-site.xml and core-site.xml)
+# to be present on the classpath. There are cases when these configs are
+# needed to be passed programatically, such as while using the PigServer API.
+# In such cases, you can override hadoop configs by setting the property
+# "pig.use.overriden.hadoop.configs".
+#
+# When this property is set to true, Pig ignores looking for hadoop configs
+# in the classpath and instead picks it up from Properties/Configuration
+# object passed to it.
+#
+# pig.use.overriden.hadoop.configs=false
+
+# Implied LoadFunc for the LOAD operation when no USING clause is
+# present. Supply the fully qualified class name of a LoadFunc
+# implementation. Note: setting this means you will have to modify most code
+# brought in from elsewhere on the web, as people generally omit the USING
+# clause for TSV files.
+#
+# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values LoadFunc
+# * my.custom.udfcollection.MyCustomLoadFunc (for example): use MyCustomLoadFunc instead
+#
+# pig.default.load.func=&lt;fully qualified class name of a LoadFunc implementation&gt;
+
+# The implied StoreFunc for STORE operations with no USING clause. Supply the
+# fully qualified class name of a StoreFunc implementation.
+#
+# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values StoreFunc.
+# * my.custom.udfcollection.MyCustomStoreFunc (for example): use MyCustomStoreFunc instead
+#
+# pig.default.store.func=&lt;fully qualified class name of a StoreFunc implementation&gt;
+
+# Recover jobs when the application master is restarted? (default: false). This
+# is a Hadoop 2 specific property; enable it to take advantage of AM recovery.
+#
+# pig.output.committer.recovery.support=true
+
+# Should scripts check to prevent multiple stores writing to the same location?
+# (default: false) When set to true, stops the execution of script right away.
+#
+pig.location.check.strict=false
+
+# In addition to the fs-style commands (rm, ls, etc) Pig can now execute
+# SQL-style DDL commands, eg "sql create table pig_test(name string, age int)".
+# The only implemented backend is hcat, and luckily that's also the default.
+#
+# pig.sql.type=hcat
+
+# Path to the hcat executable, for use with pig.sql.type=hcat (default: null)
+#
+hcat.bin=/usr/local/hcat/bin/hcat
+
+###########################################################################
+#
+# Overrides for extreme environments
+#
+# (Most people won't have to adjust these parameters)
+#
+
+
+# Limit the pig script length placed in the jobconf xml. (default:10240)
+# Extremely long queries can waste space in the JobConf; since its contents are
+# only advisory, the default is fine unless you are retaining it for forensics.
+#
+# pig.script.max.size=10240
+
+# Disable use of counters by Pig. Note that the word 'counter' is singular here.
+#
+# * false (default, recommended): do NOT disable counters.
+# * true: disable counters. Set this to true only when your Pig job will
+#   otherwise die because of using more counters than hadoop configured limit
+#
+# pig.disable.counter=true
+
+# Sample size (per-mapper, in number of rows) the ORDER..BY operation's
+# RandomSampleLoader uses to estimate how your data should be
+# partitioned. (default, recommended: 100 rows per task) Increase this if you
+# have exceptionally large input splits and are unhappy with the reducer skew.
+#
+# pig.random.sampler.sample.size=100
+
+# Process an entire script at once, reducing the amount of work and number of
+# tasks? (default, recommended: true) See http://pig.apache.org/docs/r0.12.0/perf.html#multi-query-execution
+#
+# MultiQuery optimization is very useful, and so the recommended default is
+# true. You may find a that a script fails to compile under MultiQuery. If so,
+# disable it at runtime:
+#
+#     pig -no_multiquery script_that_makes_pig_sad.pig
+#
+# opt.multiquery=true
+
+# For small queries, fetch data directly from the HDFS. (default, recommended:
+# true). If you want to force Pig to launch a MR job, for example when you're
+# testing a live cluster, disable with the -N option. See PIG-3642.
+#
+# opt.fetch=true
+
+###########################################################################
+#
+# Streaming properties
+#
+
+# Define what properties will be set in the streaming environment. Just set this
+# property to a comma-delimited list of properties to set, and those properties
+# will be set in the environment.
+#
+# pig.streaming.environment=&lt;comma-delimited list of propertes&gt;
+
+# Specify a comma-delimited list of local files to ship to distributed cache for
+# streaming job.
+#
+# pig.streaming.ship.files=&lt;comma-delimited list of local files&gt;
+
+# Specify a comma-delimited list of remote files to cache on distributed cache
+# for streaming job.
+#
+# pig.streaming.cache.files=&lt;comma-delimited list of remote files&gt;
+
+# Specify the python command to be used for python streaming udf. By default,
+# python is used, but you can overwrite it with a non-default version such as
+# python2.7.
+#
+# pig.streaming.udf.python.command=python
+
+    </value>
+    <description>Describe all the Pig agent configurations</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/kerberos.json
new file mode 100755
index 0000000..22dd6cb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/kerberos.json
@@ -0,0 +1,17 @@
+{
+  "services": [
+    {
+      "name": "PIG",
+      "components": [
+        {
+          "name": "PIG",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml
new file mode 100755
index 0000000..85318de
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml
@@ -0,0 +1,86 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <displayName>Pig</displayName>
+      <comment>Scripting platform for analyzing large datasets</comment>
+      <version>0.15.0</version>
+      <components>
+        <component>
+          <name>PIG</name>
+          <displayName>Pig</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/pig_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>pig-env.sh</fileName>
+              <dictionaryName>pig-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>pig-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>pig.properties</fileName>
+              <dictionaryName>pig-properties</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>pig-env</config-type>
+        <config-type>pig-log4j</config-type>
+        <config-type>pig-properties</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/files/pigSmoke.sh
new file mode 100755
index 0000000..a22456e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params.py
new file mode 100755
index 0000000..89ab726
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params.py
@@ -0,0 +1,25 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management.libraries.functions.default import default
+
+from params_linux import *
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
\ No newline at end of file