You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by mr...@apache.org on 2017/11/27 23:29:33 UTC

[21/30] ambari git commit: Merge trunk with feature branch and fix some UT compilation issues (mradhakrishnan)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-agent/src/main/python/ambari_agent/HostInfo.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
index b8545cc..4a752fc 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
@@ -57,6 +57,9 @@ class HostInfo(object):
     self.config = config
     self.reportFileHandler = HostCheckReportFileHandler(config)
 
+  def register(self, dict_obj, componentsMapped=True, commandsInProgress=True):
+    raise NotImplementedError()
+
   def dirType(self, path):
     if not os.path.exists(path):
       return 'not_exist'
@@ -245,21 +248,20 @@ class HostInfoLinux(HostInfo):
         cmd = cmd.replace('\0', ' ')
         if not 'AmbariServer' in cmd:
           if 'java' in cmd:
-            dict = {}
-            dict['pid'] = int(pid)
-            dict['hadoop'] = False
+            metrics = {}
+            metrics['pid'] = int(pid)
+            metrics['hadoop'] = False
             for filter in self.PROC_FILTER:
               if filter in cmd:
-                dict['hadoop'] = True
-            dict['command'] = unicode(cmd.strip(), errors='ignore')
+                metrics['hadoop'] = True
+            metrics['command'] = unicode(cmd.strip(), errors='ignore')
             for line in open(os.path.join('/proc', pid, 'status')):
               if line.startswith('Uid:'):
                 uid = int(line.split()[1])
-                dict['user'] = pwd.getpwuid(uid).pw_name
-            list.append(dict)
+                metrics['user'] = pwd.getpwuid(uid).pw_name
+            list.append(metrics)
     except:
       logger.exception("Checking java processes failed")
-    pass
 
   def getTransparentHugePage(self):
     thp_regex = "\[(.+)\]"
@@ -312,54 +314,54 @@ class HostInfoLinux(HostInfo):
       logger.exception('Unable to get information about JCE')
       return None
 
-  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+  def register(self, metrics, componentsMapped=True, commandsInProgress=True):
     """ Return various details about the host
     componentsMapped: indicates if any components are mapped to this host
     commandsInProgress: indicates if any commands are in progress
     """
 
-    dict['hostHealth'] = {}
+    metrics['hostHealth'] = {}
 
     java = []
     self.javaProcs(java)
-    dict['hostHealth']['activeJavaProcs'] = java
+    metrics['hostHealth']['activeJavaProcs'] = java
 
     liveSvcs = []
     self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
-    dict['hostHealth']['liveServices'] = liveSvcs
+    metrics['hostHealth']['liveServices'] = liveSvcs
 
-    dict['umask'] = str(self.getUMask())
+    metrics['umask'] = str(self.getUMask())
 
-    dict['transparentHugePage'] = self.getTransparentHugePage()
-    dict['firewallRunning'] = self.checkFirewall()
-    dict['firewallName'] = self.getFirewallName()
-    dict['reverseLookup'] = self.checkReverseLookup()
-    dict['hasUnlimitedJcePolicy'] = self.checkUnlimitedJce()
+    metrics['transparentHugePage'] = self.getTransparentHugePage()
+    metrics['firewallRunning'] = self.checkFirewall()
+    metrics['firewallName'] = self.getFirewallName()
+    metrics['reverseLookup'] = self.checkReverseLookup()
+    metrics['hasUnlimitedJcePolicy'] = self.checkUnlimitedJce()
     # If commands are in progress or components are already mapped to this host
     # Then do not perform certain expensive host checks
     if componentsMapped or commandsInProgress:
-      dict['alternatives'] = []
-      dict['stackFoldersAndFiles'] = []
-      dict['existingUsers'] = []
+      metrics['alternatives'] = []
+      metrics['stackFoldersAndFiles'] = []
+      metrics['existingUsers'] = []
 
     else:
       etcs = []
       self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
-      dict['alternatives'] = etcs
+      metrics['alternatives'] = etcs
 
       existingUsers = []
       self.checkUsers(self.DEFAULT_USERS, existingUsers)
-      dict['existingUsers'] = existingUsers
+      metrics['existingUsers'] = existingUsers
 
       dirs = []
       self.checkFolders(self.DEFAULT_BASEDIRS, self.DEFAULT_PROJECT_NAMES, self.EXACT_DIRECTORIES, existingUsers, dirs)
-      dict['stackFoldersAndFiles'] = dirs
+      metrics['stackFoldersAndFiles'] = dirs
 
-      self.reportFileHandler.writeHostCheckFile(dict)
+      self.reportFileHandler.writeHostCheckFile(metrics)
       pass
 
     # The time stamp must be recorded at the end
-    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+    metrics['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
 
     pass
 
@@ -434,43 +436,42 @@ class HostInfoWindows(HostInfo):
     code, out, err = run_powershell_script(self.SERVICE_STATUS_CMD.format(serivce_name))
     return out, err, code
 
-  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+  def register(self, metrics, componentsMapped=True, commandsInProgress=True):
     """ Return various details about the host
     componentsMapped: indicates if any components are mapped to this host
     commandsInProgress: indicates if any commands are in progress
     """
-    dict['hostHealth'] = {}
+    metrics['hostHealth'] = {}
 
     java = []
     self.javaProcs(java)
-    dict['hostHealth']['activeJavaProcs'] = java
+    metrics['hostHealth']['activeJavaProcs'] = java
 
     liveSvcs = []
     self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
-    dict['hostHealth']['liveServices'] = liveSvcs
+    metrics['hostHealth']['liveServices'] = liveSvcs
 
-    dict['umask'] = str(self.getUMask())
+    metrics['umask'] = str(self.getUMask())
 
-    dict['firewallRunning'] = self.checkFirewall()
-    dict['firewallName'] = self.getFirewallName()
-    dict['reverseLookup'] = self.checkReverseLookup()
+    metrics['firewallRunning'] = self.checkFirewall()
+    metrics['firewallName'] = self.getFirewallName()
+    metrics['reverseLookup'] = self.checkReverseLookup()
     # If commands are in progress or components are already mapped to this host
     # Then do not perform certain expensive host checks
     if componentsMapped or commandsInProgress:
-      dict['alternatives'] = []
-      dict['stackFoldersAndFiles'] = []
-      dict['existingUsers'] = []
+      metrics['alternatives'] = []
+      metrics['stackFoldersAndFiles'] = []
+      metrics['existingUsers'] = []
     else:
       existingUsers = []
       self.checkUsers(self.DEFAULT_USERS, existingUsers)
-      dict['existingUsers'] = existingUsers
+      metrics['existingUsers'] = existingUsers
       # TODO check HDP stack and folders here
-      self.reportFileHandler.writeHostCheckFile(dict)
+      self.reportFileHandler.writeHostCheckFile(metrics)
       pass
 
     # The time stamp must be recorded at the end
-    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
-
+    metrics['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
 
 
 def main(argv=None):

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
index 05f8023..5c0305e 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
@@ -463,7 +463,7 @@ class BaseAlert(object):
       # get the host for dfs.namenode.http-address.c1ha.nn1 and see if it's
       # this host
       value = self._get_configuration_value(key)
-      if value is not None and (self.host_name in value or self.public_host_name in value):
+      if value is not None and (self.host_name.lower() in value.lower() or self.public_host_name.lower() in value.lower()):
         return AlertUri(uri=value, is_ssl_enabled=is_ssl_enabled)
 
     return None

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-agent/src/test/python/ambari_agent/TestAmbariAgent.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestAmbariAgent.py b/ambari-agent/src/test/python/ambari_agent/TestAmbariAgent.py
index 8ff192a..56a73fc 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestAmbariAgent.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestAmbariAgent.py
@@ -35,8 +35,7 @@ class TestAmbariAgent(unittest.TestCase):
   @patch("os.path.isfile")
   @patch("os.remove")
   @patch("os.killpg")
-  @patch("os.setpgrp")
-  def test_main(self, os_setpgrp_mock, os_killpg_mock, os_remove_mock,
+  def test_main(self, os_killpg_mock, os_remove_mock,
                 os_path_isfile_mock, subprocess_popen_mock):
     facter1 = MagicMock()
     facter2 = MagicMock()
@@ -49,7 +48,6 @@ class TestAmbariAgent(unittest.TestCase):
     sys.argv[0] = "test data"
     AmbariAgent.main()
 
-    self.assertTrue(os_setpgrp_mock.called)
     self.assertTrue(subprocess_popen_mock.called)
     self.assertTrue(subprocess_popen_mock.call_count == 2)
     self.assertTrue(facter1.communicate.called)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-agent/src/test/python/ambari_agent/TestHardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHardware.py b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
index 5400e26..e78f8f2 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHardware.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
@@ -26,14 +26,12 @@ import unittest
 import platform
 import socket
 import subprocess
-import os
 from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 from ambari_agent import hostname
 from ambari_agent.Hardware import Hardware
 from ambari_agent.AmbariConfig import AmbariConfig
 from ambari_agent.Facter import Facter, FacterLinux
 from ambari_commons import OSCheck
-from resource_management.core import shell
 
 
 @not_for_platform(PLATFORM_WINDOWS)
@@ -61,10 +59,10 @@ class TestHardware(TestCase):
   def test_build(self, get_os_version_mock, get_os_type_mock):
     get_os_type_mock.return_value = "suse"
     get_os_version_mock.return_value = "11"
-    config = None
-    hardware = Hardware(config)
+    hardware = Hardware()
     result = hardware.get()
     osdisks = hardware.osdisks()
+
     for dev_item in result['mounts']:
       self.assertTrue(dev_item['available'] >= 0)
       self.assertTrue(dev_item['used'] >= 0)
@@ -113,7 +111,33 @@ class TestHardware(TestCase):
     chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect
     shell_call_mock.return_value = (0, df_output, '')
 
-    result = Hardware.osdisks()
+    result = Hardware(cache_info=False).osdisks()
+
+    self.assertEquals(1, len(result))
+
+    expected_mounts_left = ["/"]
+    mounts_left = [item["mountpoint"] for item in result]
+
+    self.assertEquals(expected_mounts_left, mounts_left)
+
+  @patch.object(Hardware, "_chk_writable_mount")
+  @patch("ambari_agent.Hardware.path_isfile")
+  @patch("resource_management.core.shell.call")
+  def test_osdisks_no_ignore_property(self, shell_call_mock, isfile_mock, chk_writable_mount_mock):
+    df_output = \
+      """Filesystem                                                                                        Type  1024-blocks     Used Available Capacity Mounted on
+      /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs      31447040  1282384  30164656       5% /
+      """
+
+    isfile_mock.return_value = False
+    chk_writable_mount_mock.return_value = True
+    shell_call_mock.return_value = (0, df_output, '')
+    config = AmbariConfig()
+
+    # check, that config do not define ignore_mount_points property
+    self.assertEquals("test", config.get('agent', 'ignore_mount_points', default="test"))
+
+    result = Hardware(config=config, cache_info=False).osdisks()
 
     self.assertEquals(1, len(result))
 
@@ -128,35 +152,35 @@ class TestHardware(TestCase):
   def test_osdisks_remote(self, shell_call_mock, get_os_version_mock, get_os_type_mock):
     get_os_type_mock.return_value = "suse"
     get_os_version_mock.return_value = "11"
-    Hardware.osdisks()
+    Hardware(cache_info=False).osdisks()
     timeout = 10
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     config = AmbariConfig()
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY)
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "true")
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "false")
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT", "-l"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0")
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT", "-l"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     timeout = 1
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout))
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT", "-l"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     timeout = 2
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout))
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT", "-l"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
   def test_parse_df_line(self):
@@ -182,7 +206,11 @@ class TestHardware(TestCase):
     ]
 
     for sample in samples:
-      result = Hardware._parse_df_line(sample["sample"])
+      try:
+        result = Hardware(cache_info=False)._parse_df([sample["sample"]]).next()
+      except StopIteration:
+        result = None
+
       self.assertEquals(result, sample["expected"], "Failed with sample: '{0}', expected: {1}, got: {2}".format(
         sample["sample"],
         sample["expected"],
@@ -430,7 +458,7 @@ SwapFree:        1598676 kB
     }
     conf.configure_mock(**attr)
 
-    result = Hardware.osdisks(conf)
+    result = Hardware(config=conf, cache_info=False).osdisks()
 
     self.assertEquals(1, len(result))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestRegistration.py b/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
index a5c23fa..fada29c 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
@@ -19,7 +19,6 @@ limitations under the License.
 '''
 
 from unittest import TestCase
-import os
 import tempfile
 from mock.mock import patch
 from mock.mock import MagicMock
@@ -35,13 +34,14 @@ class TestRegistration(TestCase):
 
   @patch("subprocess.Popen")
   @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
+  @patch("__builtin__.open", new=MagicMock())
   @patch.object(FacterLinux, "facterInfo", new = MagicMock(return_value={}))
   @patch.object(FacterLinux, "__init__", new = MagicMock(return_value = None))
   @patch("resource_management.core.shell.call")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
   def test_registration_build(self, get_os_version_mock, get_os_type_mock, run_os_cmd_mock, Popen_mock):
-    config = AmbariConfig().getConfig()
+    config = AmbariConfig()
     tmpdir = tempfile.gettempdir()
     config.set('agent', 'prefix', tmpdir)
     config.set('agent', 'current_ping_port', '33777')
@@ -58,7 +58,6 @@ class TestRegistration(TestCase):
     self.assertEquals(data['timestamp'] > 1353678475465L, True, "timestamp should not be empty")
     self.assertEquals(len(data['agentEnv']) > 0, True, "agentEnv should not be empty")
     self.assertEquals(data['agentVersion'], reference_version, "agentVersion should not be empty")
-    print data['agentEnv']['umask']
     self.assertEquals(not data['agentEnv']['umask']== "", True, "agents umask should not be empty")
     self.assertEquals(data['currentPingPort'] == 33777, True, "current ping port should be 33777")
     self.assertEquals(data['prefix'], config.get('agent', 'prefix'), 'The prefix path does not match')

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
index 9117510..367e2af 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
@@ -46,8 +46,8 @@ REMOVE_WITHOUT_DEPENDENCIES_CMD = ['rpm', '-e', '--nodeps']
 
 YUM_REPO_LOCATION = "/etc/yum.repos.d"
 REPO_UPDATE_CMD = ['/usr/bin/yum', 'clean', 'metadata']
-ALL_INSTALLED_PACKAGES_CMD = [AMBARI_SUDO_BINARY, "yum", "list", "installed"]
-ALL_AVAILABLE_PACKAGES_CMD = [AMBARI_SUDO_BINARY, "yum", "list", "available"]
+ALL_INSTALLED_PACKAGES_CMD = [AMBARI_SUDO_BINARY, "yum", "list", "installed", "--noplugins"]
+ALL_AVAILABLE_PACKAGES_CMD = [AMBARI_SUDO_BINARY, "yum", "list", "available", "--noplugins"]
 VERIFY_DEPENDENCY_CMD = ['/usr/bin/yum', '-d', '0', '-e', '0', 'check', 'dependencies']
 
 # base command output sample:
@@ -138,7 +138,7 @@ class YumProvider(RPMBasedPackageProvider):
     :rtype list[list,]
     """
 
-    cmd = [AMBARI_SUDO_BINARY, "yum", "list", "available"]
+    cmd = list(ALL_AVAILABLE_PACKAGES_CMD)
 
     if repo_filter:
       cmd.extend(["--disablerepo=*", "--enablerepo=" + repo_filter])
@@ -154,7 +154,7 @@ class YumProvider(RPMBasedPackageProvider):
     :rtype list[list,]
     """
 
-    packages = self._lookup_packages([AMBARI_SUDO_BINARY, "yum", "list", "installed"], "Installed Packages")
+    packages = self._lookup_packages(list(ALL_INSTALLED_PACKAGES_CMD), "Installed Packages")
     if repo_filter:
       packages = [item for item in packages if item[2].lower() == repo_filter.lower()]
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py b/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
index f144b2d..b907844 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
@@ -39,7 +39,7 @@ from resource_management.libraries.functions.version import *
 from resource_management.libraries.functions.format_jvm_option import *
 from resource_management.libraries.functions.constants import *
 from resource_management.libraries.functions.get_stack_version import *
-from resource_management.libraries.functions.get_lzo_packages import *
+from resource_management.libraries.functions.lzo_utils import *
 from resource_management.libraries.functions.setup_ranger_plugin import *
 from resource_management.libraries.functions.curl_krb_request import *
 from resource_management.libraries.functions.get_bare_principal import *

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index c89e767..d38e273 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -294,6 +294,64 @@ def convert_conf_directories_to_symlinks(package, version, dirs):
       Logger.warning("Could not change symlink for package {0} to point to current directory. Error: {1}".format(package, e))
 
 
+def get_restricted_packages():
+  """
+  Gets the list of conf-select 'package' names that need to be invoked on the command.
+  When the server passes down the list of packages to install, check the service names
+  and use the information in stack_packages json to determine the list of packages that should
+  be executed.  That is valid only for PATCH or MAINT upgrades.  STANDARD upgrades should be
+  conf-select'ing everything it can find.
+  """
+  package_names = []
+
+  # shortcut the common case if we are not patching
+  cluster_version_summary = default("/roleParameters/cluster_version_summary/services", None)
+
+  if cluster_version_summary is None:
+    Logger.info("Cluster Summary is not available, there are no restrictions for conf-select")
+    return package_names
+
+  service_names = []
+
+  # pick out the services that are targeted
+  for servicename, servicedetail in cluster_version_summary.iteritems():
+    if servicedetail['upgrade']:
+      service_names.append(servicename)
+
+  if 0 == len(service_names):
+    Logger.info("No services found, there are no restrictions for conf-select")
+    return package_names
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.info("The stack name is not present in the command. Restricted names skipped.")
+    return package_names
+
+  stack_packages_config = default("/configurations/cluster-env/stack_packages", None)
+  if stack_packages_config is None:
+    Logger.info("The stack packages are not defined on the command. Restricted names skipped.")
+    return package_names
+
+  data = json.loads(stack_packages_config)
+
+  if stack_name not in data:
+    Logger.info("Cannot find conf-select packages for the {0} stack".format(stack_name))
+    return package_names
+
+  conf_select_key = "conf-select-patching"
+  if conf_select_key not in data[stack_name]:
+    Logger.info("There are no conf-select-patching elements defined for this command for the {0} stack".format(stack_name))
+    return package_names
+
+  service_dict = data[stack_name][conf_select_key]
+
+  for servicename in service_names:
+    if servicename in service_dict and 'packages' in service_dict[servicename]:
+      package_names.extend(service_dict[servicename]['packages'])
+
+  return package_names
+
+
 def _seed_new_configuration_directories(package, created_directories):
   """
   Copies any files from the "current" configuration directory to the directories which were

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index 219430a..c15fbd1 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -23,18 +23,184 @@ __all__ = ["copy_to_hdfs", "get_sysprep_skip_copy_tarballs_hdfs"]
 import os
 import tempfile
 import re
+import tarfile
+from contextlib import closing
 
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import component_version
+from resource_management.libraries.functions import lzo_utils
 from resource_management.libraries.functions.default import default
 from resource_management.core import shell
+from resource_management.core import sudo
 from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Directory
+from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions import stack_tools, stack_features, stack_select
+from resource_management.libraries.functions import tar_archive
 
 STACK_NAME_PATTERN = "{{ stack_name }}"
 STACK_ROOT_PATTERN = "{{ stack_root }}"
 STACK_VERSION_PATTERN = "{{ stack_version }}"
 
+def _prepare_tez_tarball():
+  """
+  Prepares the Tez tarball by adding the Hadoop native libraries found in the mapreduce tarball.
+  It's very important to use the version of mapreduce which matches tez here.
+  Additionally, this will also copy native LZO to the tez tarball if LZO is enabled and the
+  GPL license has been accepted.
+  :return:  the full path of the newly created tez tarball to use
+  """
+  import tempfile
+
+  Logger.info("Preparing the Tez tarball...")
+
+  # get the mapreduce tarball which matches the version of tez
+  # tez installs the mapreduce tar, so it should always be present
+  _, mapreduce_source_file, _, _ = get_tarball_paths("mapreduce")
+  _, tez_source_file, _, _ = get_tarball_paths("tez")
+
+  temp_dir = Script.get_tmp_dir()
+
+  # create the temp staging directories ensuring that non-root agents using tarfile can work with them
+  mapreduce_temp_dir = tempfile.mkdtemp(prefix="mapreduce-tarball-", dir=temp_dir)
+  tez_temp_dir = tempfile.mkdtemp(prefix="tez-tarball-", dir=temp_dir)
+  sudo.chmod(mapreduce_temp_dir, 0777)
+  sudo.chmod(tez_temp_dir, 0777)
+
+  Logger.info("Extracting {0} to {1}".format(mapreduce_source_file, mapreduce_temp_dir))
+  tar_archive.extract_archive(mapreduce_source_file, mapreduce_temp_dir)
+
+  Logger.info("Extracting {0} to {1}".format(tez_source_file, tez_temp_dir))
+  tar_archive.untar_archive(tez_source_file, tez_temp_dir)
+
+  hadoop_lib_native_dir = os.path.join(mapreduce_temp_dir, "hadoop", "lib", "native")
+  tez_lib_dir = os.path.join(tez_temp_dir, "lib")
+
+  if not os.path.exists(hadoop_lib_native_dir):
+    raise Fail("Unable to seed the Tez tarball with native libraries since the source Hadoop native lib directory {0} does not exist".format(hadoop_lib_native_dir))
+
+  if not os.path.exists(tez_lib_dir):
+    raise Fail("Unable to seed the Tez tarball with native libraries since the target Tez lib directory {0} does not exist".format(tez_lib_dir))
+
+  # copy native libraries from hadoop to tez
+  Execute(("cp", "-a", hadoop_lib_native_dir, tez_lib_dir), sudo = True)
+
+  # if enabled, LZO GPL libraries must be copied as well
+  if lzo_utils.should_install_lzo():
+    stack_root = Script.get_stack_root()
+    tez_version = component_version.get_component_repository_version("TEZ")
+    hadoop_lib_native_lzo_dir = os.path.join(stack_root, tez_version, "hadoop", "lib", "native")
+
+    if not sudo.path_isdir(hadoop_lib_native_lzo_dir):
+      Logger.warning("Unable to located native LZO libraries at {0}, falling back to hadoop home".format(hadoop_lib_native_lzo_dir))
+      hadoop_lib_native_lzo_dir = os.path.join(stack_root, "current", "hadoop-client", "lib", "native")
+
+    if not sudo.path_isdir(hadoop_lib_native_lzo_dir):
+      raise Fail("Unable to seed the Tez tarball with native libraries since LZO is enabled but the native LZO libraries could not be found at {0}".format(hadoop_lib_native_lzo_dir))
+
+    Execute(("cp", "-a", hadoop_lib_native_lzo_dir, tez_lib_dir), sudo = True)
+
+
+  # ensure that the tez/lib directory is readable by non-root (which it typically is not)
+  Directory(tez_lib_dir,
+    mode = 0755,
+    cd_access = 'a',
+    recursive_ownership = True)
+
+  # create the staging directory so that non-root agents can write to it
+  tez_native_tarball_staging_dir = os.path.join(temp_dir, "tez-native-tarball-staging")
+  if not os.path.exists(tez_native_tarball_staging_dir):
+    Directory(tez_native_tarball_staging_dir,
+      mode = 0777,
+      cd_access='a',
+      create_parents = True,
+      recursive_ownership = True)
+
+  tez_tarball_with_native_lib = os.path.join(tez_native_tarball_staging_dir, "tez-native.tar.gz")
+  Logger.info("Creating a new Tez tarball at {0}".format(tez_tarball_with_native_lib))
+
+  # tar up Tez, making sure to specify nothing for the arcname so that it does not include an absolute path
+  with closing(tarfile.open(tez_tarball_with_native_lib, "w:gz")) as new_tez_tarball:
+    new_tez_tarball.add(tez_temp_dir, arcname=os.path.sep)
+
+  # ensure that the tarball can be read and uploaded
+  sudo.chmod(tez_tarball_with_native_lib, 0744)
+  
+  # cleanup
+  sudo.rmtree(mapreduce_temp_dir)
+  sudo.rmtree(tez_temp_dir)
+
+  return tez_tarball_with_native_lib
+
+
+def _prepare_mapreduce_tarball():
+  """
+  Prepares the mapreduce tarball by including the native LZO libraries if necessary. If LZO is
+  not enabled or has not been opted-in, then this will do nothing and return the original
+  tarball to upload to HDFS.
+  :return:  the full path of the newly created mapreduce tarball to use or the original path
+  if no changes were made
+  """
+  # get the mapreduce tarball to crack open and add LZO libraries to
+  _, mapreduce_source_file, _, _ = get_tarball_paths("mapreduce")
+
+  if not lzo_utils.should_install_lzo():
+    return mapreduce_source_file
+
+  Logger.info("Preparing the mapreduce tarball with native LZO libraries...")
+
+  temp_dir = Script.get_tmp_dir()
+
+  # create the temp staging directories ensuring that non-root agents using tarfile can work with them
+  mapreduce_temp_dir = tempfile.mkdtemp(prefix="mapreduce-tarball-", dir=temp_dir)
+  sudo.chmod(mapreduce_temp_dir, 0777)
+
+  # calculate the source directory for LZO
+  hadoop_lib_native_source_dir = os.path.join(os.path.dirname(mapreduce_source_file), "lib", "native")
+  if not sudo.path_exists(hadoop_lib_native_source_dir):
+    raise Fail("Unable to seed the mapreduce tarball with native LZO libraries since the source Hadoop native lib directory {0} does not exist".format(hadoop_lib_native_source_dir))
+
+  Logger.info("Extracting {0} to {1}".format(mapreduce_source_file, mapreduce_temp_dir))
+  tar_archive.extract_archive(mapreduce_source_file, mapreduce_temp_dir)
+
+  mapreduce_lib_dir = os.path.join(mapreduce_temp_dir, "hadoop", "lib")
+
+  # copy native libraries from source hadoop to target
+  Execute(("cp", "-af", hadoop_lib_native_source_dir, mapreduce_lib_dir), sudo = True)
+
+  # ensure that the hadoop/lib/native directory is readable by non-root (which it typically is not)
+  Directory(mapreduce_lib_dir,
+    mode = 0755,
+    cd_access = 'a',
+    recursive_ownership = True)
+
+  # create the staging directory so that non-root agents can write to it
+  mapreduce_native_tarball_staging_dir = os.path.join(temp_dir, "mapreduce-native-tarball-staging")
+  if not os.path.exists(mapreduce_native_tarball_staging_dir):
+    Directory(mapreduce_native_tarball_staging_dir,
+      mode = 0777,
+      cd_access = 'a',
+      create_parents = True,
+      recursive_ownership = True)
+
+  mapreduce_tarball_with_native_lib = os.path.join(mapreduce_native_tarball_staging_dir, "mapreduce-native.tar.gz")
+  Logger.info("Creating a new mapreduce tarball at {0}".format(mapreduce_tarball_with_native_lib))
+
+  # tar up mapreduce, making sure to specify nothing for the arcname so that it does not include an absolute path
+  with closing(tarfile.open(mapreduce_tarball_with_native_lib, "w:gz")) as new_tarball:
+    new_tarball.add(mapreduce_temp_dir, arcname = os.path.sep)
+
+  # ensure that the tarball can be read and uploaded
+  sudo.chmod(mapreduce_tarball_with_native_lib, 0744)
+
+  # cleanup
+  sudo.rmtree(mapreduce_temp_dir)
+
+  return mapreduce_tarball_with_native_lib
+
+
 # TODO, in the future, each stack can define its own mapping of tarballs
 # inside the stack definition directory in some sort of xml file.
 # PLEASE DO NOT put this in cluster-env since it becomes much harder to change,
@@ -50,7 +216,8 @@ TARBALL_MAP = {
   "tez": {
     "dirs": ("{0}/{1}/tez/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
            "/{0}/apps/{1}/tez/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-    "service": "TEZ"
+    "service": "TEZ",
+    "prepare_function": _prepare_tez_tarball
   },
 
   "tez_hive2": {
@@ -86,7 +253,8 @@ TARBALL_MAP = {
   "mapreduce": {
     "dirs": ("{0}/{1}/hadoop/mapreduce.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
                 "/{0}/apps/{1}/mapreduce/mapreduce.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
-    "service": "MAPREDUCE2"
+    "service": "MAPREDUCE2",
+    "prepare_function": _prepare_mapreduce_tarball
   },
 
   "spark": {
@@ -132,29 +300,29 @@ def get_tarball_paths(name, use_upgrading_version_during_upgrade=True, custom_so
   :param use_upgrading_version_during_upgrade:
   :param custom_source_file: If specified, use this source path instead of the default one from the map.
   :param custom_dest_file: If specified, use this destination path instead of the default one from the map.
-  :return: A tuple of (success status, source path, destination path)
+  :return: A tuple of (success status, source path, destination path, optional preparation function which is invoked to setup the tarball)
   """
   stack_name = Script.get_stack_name()
 
   if not stack_name:
     Logger.error("Cannot copy {0} tarball to HDFS because stack name could not be determined.".format(str(name)))
-    return (False, None, None)
+    return False, None, None
 
   if name is None or name.lower() not in TARBALL_MAP:
     Logger.error("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(str(name), str(stack_name)))
-    return (False, None, None)
+    return False, None, None
 
   service = TARBALL_MAP[name.lower()]['service']
 
   stack_version = get_current_version(service=service, use_upgrading_version_during_upgrade=use_upgrading_version_during_upgrade)
   if not stack_version:
     Logger.error("Cannot copy {0} tarball to HDFS because stack version could be be determined.".format(str(name)))
-    return (False, None, None)
+    return False, None, None
 
   stack_root = Script.get_stack_root()
   if not stack_root:
     Logger.error("Cannot copy {0} tarball to HDFS because stack root could be be determined.".format(str(name)))
-    return (False, None, None)
+    return False, None, None
 
   (source_file, dest_file) = TARBALL_MAP[name.lower()]['dirs']
 
@@ -173,7 +341,11 @@ def get_tarball_paths(name, use_upgrading_version_during_upgrade=True, custom_so
   source_file = source_file.replace(STACK_VERSION_PATTERN, stack_version)
   dest_file = dest_file.replace(STACK_VERSION_PATTERN, stack_version)
 
-  return (True, source_file, dest_file)
+  prepare_function = None
+  if "prepare_function" in TARBALL_MAP[name.lower()]:
+    prepare_function = TARBALL_MAP[name.lower()]['prepare_function']
+
+  return True, source_file, dest_file, prepare_function
 
 
 def get_current_version(service=None, use_upgrading_version_during_upgrade=True):
@@ -272,8 +444,8 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
   import params
 
   Logger.info("Called copy_to_hdfs tarball: {0}".format(name))
-  (success, source_file, dest_file) = get_tarball_paths(name, use_upgrading_version_during_upgrade,
-                                                         custom_source_file, custom_dest_file)
+  (success, source_file, dest_file, prepare_function) = get_tarball_paths(name, use_upgrading_version_during_upgrade,
+                                                                          custom_source_file, custom_dest_file)
 
   if not success:
     Logger.error("Could not copy tarball {0} due to a missing or incorrect parameter.".format(str(name)))
@@ -311,6 +483,9 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
   # The logic above cannot be used until fast-hdfs-resource.jar supports the mv command, or it switches
   # to WebHDFS.
 
+  # if there is a function which is needed to prepare the tarball, then invoke it first
+  if prepare_function is not None:
+    source_file = prepare_function()
 
   # If the directory already exists, it is a NO-OP
   dest_dir = os.path.dirname(dest_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py b/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
index 95e8625..55395ce 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
@@ -21,28 +21,20 @@ Ambari Agent
 """
 
 __all__ = ["curl_krb_request"]
+import hashlib
 import logging
 import os
+
 import time
 
+from get_kinit_path import get_kinit_path
+from get_klist_path import get_klist_path
 from resource_management.core import global_lock
 from resource_management.core import shell
 from resource_management.core.exceptions import Fail
-from get_kinit_path import get_kinit_path
-from get_klist_path import get_klist_path
 from resource_management.libraries.functions.get_user_call_output import get_user_call_output
 
-# hashlib is supplied as of Python 2.5 as the replacement interface for md5
-# and other secure hashes.  In 2.6, md5 is deprecated.  Import hashlib if
-# available, avoiding a deprecation warning under 2.6.  Import md5 otherwise,
-# preserving 2.4 compatibility.
-try:
-  import hashlib
-  _md5 = hashlib.md5
-except ImportError:
-  import md5
-  _md5 = md5.new
-
+HASH_ALGORITHM = hashlib.sha224
 CONNECTION_TIMEOUT_DEFAULT = 10
 MAX_TIMEOUT_DEFAULT = CONNECTION_TIMEOUT_DEFAULT + 2
 
@@ -103,10 +95,10 @@ def curl_krb_request(tmp_dir, keytab, principal, url, cache_file_prefix,
   is_kinit_required = False
 
   # Create the kerberos credentials cache (ccache) file and set it in the environment to use
-  # when executing curl. Use the md5 hash of the combination of the principal and keytab file
+  # when executing curl. Use a hash of the combination of the principal and keytab file
   # to generate a (relatively) unique cache filename so that we can use it as needed. Scope
   # this file by user in order to prevent sharing of cache files by multiple users.
-  ccache_file_name = _md5("{0}|{1}".format(principal, keytab)).hexdigest()
+  ccache_file_name = HASH_ALGORITHM("{0}|{1}".format(principal, keytab)).hexdigest()
 
   curl_krb_cache_path = os.path.join(tmp_dir, "curl_krb_cache")
   if not os.path.exists(curl_krb_cache_path):

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
deleted file mode 100644
index cfbb7d8..0000000
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-__all__ = ["get_lzo_packages"]
-
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.script.script import Script
-
-# TODO: Make list of lzo packages stack driven
-def get_lzo_packages(stack_version_unformatted):
-  lzo_packages = []
-  script_instance = Script.get_instance()
-  if OSCheck.is_suse_family() and int(OSCheck.get_os_major_version()) >= 12:
-    lzo_packages += ["liblzo2-2", "hadoop-lzo-native"]
-  elif OSCheck.is_redhat_family() or OSCheck.is_suse_family():
-    lzo_packages += ["lzo", "hadoop-lzo-native"]
-  elif OSCheck.is_ubuntu_family():
-    lzo_packages += ["liblzo2-2"]
-
-  if stack_version_unformatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_unformatted):
-    if OSCheck.is_ubuntu_family():
-      lzo_packages += [script_instance.format_package_name("hadooplzo-${stack_version}") ,
-                       script_instance.format_package_name("hadooplzo-${stack_version}-native")]
-    else:
-      lzo_packages += [script_instance.format_package_name("hadooplzo_${stack_version}"),
-                       script_instance.format_package_name("hadooplzo_${stack_version}-native")]
-  else:
-    lzo_packages += ["hadoop-lzo"]
-
-  return lzo_packages

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py b/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
new file mode 100644
index 0000000..d6d987f
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/lzo_utils.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+__all__ = ["should_install_lzo", "get_lzo_packages", "install_lzo_if_needed"]
+
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import StackFeature, stack_features
+from resource_management.libraries.script.script import Script
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.expect import expect
+from resource_management.core.resources.packaging import Package
+
+INSTALLING_LZO_WITHOUT_GPL = "Cannot install LZO.  The GPL license must be explicitly enabled using 'ambari-server setup' on the Ambari host, then restart the server and try again."
+
+def get_lzo_packages():
+  lzo_packages = []
+  script_instance = Script.get_instance()
+  if OSCheck.is_suse_family() and int(OSCheck.get_os_major_version()) >= 12:
+    lzo_packages += ["liblzo2-2", "hadoop-lzo-native"]
+  elif OSCheck.is_redhat_family() or OSCheck.is_suse_family():
+    lzo_packages += ["lzo", "hadoop-lzo-native"]
+  elif OSCheck.is_ubuntu_family():
+    lzo_packages += ["liblzo2-2"]
+
+
+  stack_version_unformatted = stack_features.get_stack_feature_version(Script.get_config()) # only used to check stack_feature, NOT as package version!
+  if stack_version_unformatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_unformatted):
+    if OSCheck.is_ubuntu_family():
+      lzo_packages += [script_instance.format_package_name("hadooplzo-${stack_version}") ,
+                       script_instance.format_package_name("hadooplzo-${stack_version}-native")]
+    else:
+      lzo_packages += [script_instance.format_package_name("hadooplzo_${stack_version}"),
+                       script_instance.format_package_name("hadooplzo_${stack_version}-native")]
+  else:
+    lzo_packages += ["hadoop-lzo"]
+
+  return lzo_packages
+
+def should_install_lzo():
+  """
+  Return true if lzo is enabled via core-site.xml and GPL license (required for lzo) is accepted.
+  """
+  config = Script.get_config()
+  io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+  lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+  if not lzo_enabled:
+    return False
+
+  is_gpl_license_accepted = default("/hostLevelParams/gpl_license_accepted", False)
+  if not is_gpl_license_accepted:
+    Logger.warning(INSTALLING_LZO_WITHOUT_GPL)
+    return False
+
+  return True
+
+def install_lzo_if_needed():
+  """
+  Install lzo package if {#should_install_lzo} is true
+  """
+  if not should_install_lzo():
+    return
+
+  lzo_packages = get_lzo_packages()
+
+  config = Script.get_config()
+  agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+  agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+
+  Package(lzo_packages,
+          retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
+          retry_count=agent_stack_retry_count
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py b/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
index 5a16061..ded63cf 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/package_conditions.py
@@ -19,8 +19,8 @@ limitations under the License.
 Ambari Agent
 
 """
-__all__ = ["is_lzo_enabled", "should_install_phoenix", "should_install_ams_collector", "should_install_ams_grafana",
-           "should_install_mysql", "should_install_mysql_connector", "should_install_ranger_tagsync"]
+__all__ = ["should_install_phoenix", "should_install_ams_collector", "should_install_ams_grafana",
+           "should_install_mysql", "should_install_ranger_tagsync"]
 
 import os
 from resource_management.libraries.script import Script
@@ -44,12 +44,6 @@ def _has_local_components(config, components, indicator_function = any):
 def _has_applicable_local_component(config, components):
   return _has_local_components(config, components, any)
 
-def should_install_lzo():
-  config = Script.get_config()
-  io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
-  lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-  return lzo_enabled
-
 def should_install_phoenix():
   phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
   phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled', False)
@@ -85,10 +79,6 @@ def should_install_mysql():
     return False
   return _has_applicable_local_component(config, "MYSQL_SERVER")
 
-def should_install_mysql_connector():
-  mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-  return not os.path.exists(mysql_jdbc_driver_jar)
-
 def should_install_hive_atlas():
   atlas_hosts = default('/clusterHostInfo/atlas_server_hosts', [])
   has_atlas = len(atlas_hosts) > 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
index 9b7d0eb..b741a33 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
@@ -71,7 +71,8 @@ HADOOP_DIR_DEFAULTS = {
   "libexec": "/usr/lib/hadoop/libexec",
   "sbin": "/usr/lib/hadoop/sbin",
   "bin": "/usr/bin",
-  "lib": "/usr/lib/hadoop/lib"
+  "lib": "/usr/lib/hadoop/lib",
+  "conf": "/etc/hadoop/conf"
 }
 
 PACKAGE_SCOPE_INSTALL = "INSTALL"

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py b/ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py
index e6d8924..7976587 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/tar_archive.py
@@ -46,11 +46,13 @@ def archive_directory_dereference(archive, directory):
     try_sleep = 1,
   )
 
-def untar_archive(archive, directory):
+def untar_archive(archive, directory, silent=True):
   """
   :param directory:   can be a symlink and is followed
   """
-  Execute(('tar','-xvf',archive,'-C',directory+"/"),
+  options = "-xf" if silent else "-xvf"
+
+  Execute(('tar',options,archive,'-C',directory+"/"),
     sudo = True,
     tries = 3,
     try_sleep = 1,

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/DeleteServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/DeleteServiceTest.java b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/DeleteServiceTest.java
index 1bfc6a2..6ebf2e2 100644
--- a/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/DeleteServiceTest.java
+++ b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/DeleteServiceTest.java
@@ -72,6 +72,7 @@ public class DeleteServiceTest extends ServerTestBase {
     public void testDeleteService() throws Exception {
         String clusterName = "c1";
         String serviceName = "HDFS";
+        Long serviceId = 1L;
         ConnectionParams params = new ConnectionParams();
 
         params.setServerName("localhost");
@@ -128,7 +129,7 @@ public class DeleteServiceTest extends ServerTestBase {
         List<ServiceDesiredStateEntity> serviceDesiredStateEntities = serviceDesiredStateDAO.findAll();
         assertEquals(serviceDesiredStateEntities.size(), 1);
         ServiceDesiredStateEntity serviceDesiredStateEntity = serviceDesiredStateEntities.get(0);
-        assertEquals(serviceDesiredStateEntity.getServiceName(), serviceName);
+        assertEquals(serviceDesiredStateEntity.getServiceId(),serviceId);
         assertEquals(serviceDesiredStateEntity.getDesiredState(), State.INSTALLED);
 
         /**
@@ -166,7 +167,8 @@ public class DeleteServiceTest extends ServerTestBase {
         /**
          * ClusterServiceDAO - the service entry should have been removed.
          */
-        clusterServiceEntity = clusterServiceDAO.findByClusterAndServiceNames(clusterName, serviceName);
+        //TODO : Requires logic change
+        clusterServiceEntity = clusterServiceDAO.findById(1L,1L,1L);
         assertTrue(clusterServiceEntity == null);
 
         /**
@@ -174,7 +176,8 @@ public class DeleteServiceTest extends ServerTestBase {
          */
         ServiceDesiredStateEntityPK serviceDesiredStateEntityPK = injector.getInstance(ServiceDesiredStateEntityPK.class);
         serviceDesiredStateEntityPK.setClusterId(clusterId);
-        serviceDesiredStateEntityPK.setServiceName(serviceName);
+        //TODO : Requires logic change
+        serviceDesiredStateEntityPK.setServiceId(1L);
         serviceDesiredStateEntity =  serviceDesiredStateDAO.findByPK(serviceDesiredStateEntityPK);
         assertTrue(serviceDesiredStateEntity == null);
 
@@ -187,7 +190,7 @@ public class DeleteServiceTest extends ServerTestBase {
         /**
          * HostComponentStateDAO
          */
-        hostComponentStateEntities = hostComponentStateDAO.findByService(serviceName);
+        hostComponentStateEntities = hostComponentStateDAO.findByService(1L, 1L, 1L);
         assertEquals(hostComponentStateEntities.size(), 0);
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/README.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/README.md b/ambari-logsearch/README.md
index d05f45a..2a124d1 100644
--- a/ambari-logsearch/README.md
+++ b/ambari-logsearch/README.md
@@ -4,21 +4,38 @@
 
 ### Prerequisites
 
-Install [docker] (https://docs.docker.com/)
-For Mac OS X use [Docker Machine] (https://docs.docker.com/machine/)
+- Install [docker](https://docs.docker.com/)
+- For Mac OS X use [Docker for Mac](https://docs.docker.com/docker-for-mac/)
+- [Docker compose](https://docs.docker.com/compose/) is also required.
 
 ### Build and start Log Search in docker container
 ```bash
 # to see available commands: run start-logsearch without arguments
 cd docker
-./logsearch-docker build-and-run
+./logsearch-docker build-and-run # build mvn project locally, build docker image, start containers
 ```
-If you run the script at first time, it will generate you a new Profile file inside docker directory, in that file you should set AMBARI_LOCATION (point to the local ambari root folder) and MAVEN_REPOSITORY_LOCATION (point to local maven repository location). These will be used as volumes for the docker container. If Profile is generated for first time use `./logsearch-docker start` command to start container without rebuild the project/container again.
+If you run the script at first time, it will generate you a new `Profile` file or an `.env` file inside docker directory (run twice if both missing and you want to generate Profile and .env as well), in .env file you should set `MAVEN_REPOSITORY_LOCATION` (point to local maven repository location, it uses `~/.m2` by default). These will be used as volumes for the docker container. Profile file holds the environment variables that are used inside the containers, the .env file is used outside of the containers
 
-After the logsearch container is started you can enter to it with following command:
+Then you can use the `logsearch-docker` script to start the containers (`start` command).
+Also you can use docker-compose manually to start/manage the containers.
 ```bash
+docker-compose up -d
+# or start all services in one container:
+docker-compose -f all.yml up -d
+```
+After the logsearch container is started you can enter to it with following commands:
+```bash
+docker exec -it docker_logsearch_1 bash
+# or if you used all.yml for starting the logsearch docker container:
 docker exec -it logsearch bash
 ```
+In case if you started the containers separately and if you would like to access Solr locally with through your external ZooKeeper container, then point `solr` to `localhost` in your `/etc/hosts` file.
+
+### Run applications from IDE / maven
+
+- [Start Log Search locally](ambari-logsearch-server/README.md)
+- [Start Log Feeder locally](ambari-logsearch-logfeeder/README.md)
+
 ## Package build process
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/README.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/README.md b/ambari-logsearch/ambari-logsearch-logfeeder/README.md
index 5a64daf..d2e55f0 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/README.md
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/README.md
@@ -22,19 +22,18 @@ limitations under the License.
 Log Feeder is a component of the Log Search service that reads logs, parses them and stores them in Apache Solr for the purpose
 of later analysis.
 
-# Compilation
+## Start locally from maven / IDE
+
+First you need to start every required service (except logfeeder), go to `ambari-logsearch/docker` folder and run:
 ```bash
-mvn clean compile package
+docker-compose up -d zookeeper solr logsearch
 ```
-# Deploy
-## Copy to remote
-copy target/logsearch-logfeeder.tgz to host machine
 
-## Setup environment
+Secondly, if you are planning to run Log Feeder from an IDE, for running the LogFeeder main methoud, you will need to set the working directory to `ambari/ambari-logsearch/ambari-logsearch-logfeeder` and use `--monitor` as a command line argument.
+With Maven, you won't need these steps, just run this command from the ambari-logsearch-logfeeder folder:
+
 ```bash
-mkdir /opt/logfeeder
-cd /opt/logfeeder
-tar xfz ~/logsearch-logfeeder.tar.gz 
+mvn clean package -DskipTests exec:java
 ```
 
 # Input Configuration

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
index b1b6ece..01710bf 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
@@ -220,7 +220,9 @@
         </executions>
         <configuration>
           <mainClass>org.apache.ambari.logfeeder.LogFeeder</mainClass>
-          <!-- <arguments> <argument></argument> </arguments> -->
+          <arguments>
+            <argument>--monitor</argument>
+          </arguments>
         </configuration>
       </plugin>
       <!-- copy-dependencies -->
@@ -238,12 +240,12 @@
 
             </goals>
             <configuration>
-              <artifactItems>*</artifactItems>
               <outputAbsoluteArtifactFilename>true</outputAbsoluteArtifactFilename>
               <outputDirectory>${basedir}/target/libs</outputDirectory>
               <overWriteReleases>false</overWriteReleases>
               <overWriteSnapshots>false</overWriteSnapshots>
               <overWriteIfNewer>true</overWriteIfNewer>
+              <includeScope>compile</includeScope>
             </configuration>
           </execution>
         </executions>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
index f446446..c832358 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/LogFeederAMSClient.java
@@ -64,6 +64,9 @@ public class LogFeederAMSClient extends AbstractTimelineMetricsSink {
 
   @Override
   public String getCollectorUri(String host) {
+    if (collectorProtocol == null || host == null || collectorPort == null || collectorPath == null) {
+      return null;
+    }
     return String.format("%s://%s:%s%s", collectorProtocol, host, collectorPort, collectorPath);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/MetricsManager.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/MetricsManager.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/MetricsManager.java
index 1094852..6e8ac04 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/MetricsManager.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/java/org/apache/ambari/logfeeder/metrics/MetricsManager.java
@@ -45,7 +45,9 @@ public class MetricsManager {
 
   public void init() {
     LOG.info("Initializing MetricsManager()");
-    amsClient = new LogFeederAMSClient();
+    if (amsClient == null) {
+      amsClient = new LogFeederAMSClient();
+    }
 
     if (amsClient.getCollectorUri(null) != null) {
       if (LogFeederUtil.hostName == null) {
@@ -144,4 +146,8 @@ public class MetricsManager {
           (currMS - lastPublishTimeMS) / 1000 + " seconds ago, intervalConfigured=" + publishIntervalMS / 1000);
     }
   }
+
+  public void setAmsClient(LogFeederAMSClient amsClient) {
+    this.amsClient = amsClient;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/.gitignore
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/.gitignore b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/.gitignore
new file mode 100644
index 0000000..dfb10d6
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/.gitignore
@@ -0,0 +1,4 @@
+logs/*.log
+shipper-conf/input.config-*.json
+!shipper-conf/input.config-sample.json
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/logs/service_sample.txt
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/logs/service_sample.txt b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/logs/service_sample.txt
new file mode 100644
index 0000000..21048ac
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/logs/service_sample.txt
@@ -0,0 +1,3 @@
+2016-07-13 10:45:49,640 [WARN] Sample log line 1 - warn level
+2016-07-13 10:45:49,640 [ERROR] Sample log line 2 - error level
+2016-07-13 10:45:50,351 [INFO] Sample log line 3 - info level
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/global.config.json
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/global.config.json b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/global.config.json
new file mode 100644
index 0000000..6b8602c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/global.config.json
@@ -0,0 +1,10 @@
+{
+  "global":{
+    "add_fields":{
+      "cluster":"cl1"
+    },
+    "source":"file",
+    "tail":"true",
+    "gen_event_md5":"true"
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/input.config-sample.json
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/input.config-sample.json b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/input.config-sample.json
new file mode 100644
index 0000000..4ab2eb2
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/input.config-sample.json
@@ -0,0 +1,31 @@
+{
+  "input": [
+    {
+      "type": "service_sample",
+      "rowtype": "service",
+      "path": "target/classes/log-samples/logs/service_sample.txt"
+    }
+  ],
+  "filter": [
+    {
+      "filter": "grok",
+      "conditions": {
+        "fields": {
+          "type": [
+            "service_sample"
+          ]
+        }
+      },
+      "log4j_format": "",
+      "multiline_pattern": "^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern": "(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{LOGLEVEL:level}\\]%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values": {
+        "logtime": {
+          "map_date": {
+            "date_pattern": "yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/output.config-sample.json
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/output.config-sample.json b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/output.config-sample.json
new file mode 100644
index 0000000..94b44da
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log-samples/shipper-conf/output.config-sample.json
@@ -0,0 +1,34 @@
+{
+  "output": [
+    {
+      "is_enabled": "true",
+      "comment": "Output to solr for service logs",
+      "destination": "solr",
+      "zk_connect_string": "localhost:2181",
+      "type": "service",
+      "skip_logtime": "true",
+      "conditions": {
+        "fields": {
+          "rowtype": [
+            "service"
+          ]
+        }
+      }
+    },
+    {
+      "comment": "Output to solr for audit records",
+      "is_enabled": "true",
+      "destination": "solr",
+      "zk_connect_string": "localhost:2181",
+      "type": "audit",
+      "skip_logtime": "true",
+      "conditions": {
+        "fields": {
+          "rowtype": [
+            "audit"
+          ]
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log4j.xml b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log4j.xml
index 8d5174e..eb20665 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log4j.xml
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/log4j.xml
@@ -28,7 +28,7 @@
   </appender>
 
   <appender name="daily_rolling_file" class="org.apache.log4j.DailyRollingFileAppender"> 
-    <param name="file" value="logs/logsearch-logfeeder.log" /> 
+    <param name="file" value="target/logs/logsearch-logfeeder.log" />
     <param name="datePattern"  value="'.'yyyy-MM-dd" /> 
     <param name="append" value="true" /> 
     <layout class="org.apache.log4j.PatternLayout"> 
@@ -38,7 +38,7 @@
 
   <appender name="rolling_file_json"
     class="org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender">
-    <param name="file" value="logs/logsearch-logfeeder.json" />
+    <param name="file" value="target/logs/logsearch-logfeeder.json" />
     <param name="append" value="true" />
     <param name="maxFileSize" value="10MB" />
     <param name="maxBackupIndex" value="10" />

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/logfeeder.properties
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/logfeeder.properties b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/logfeeder.properties
index 74ea0ef..115778b 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/logfeeder.properties
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/main/resources/logfeeder.properties
@@ -13,19 +13,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-logfeeder.checkpoint.folder=
+cluster.name=cl1
+logfeeder.checkpoint.folder=target/checkpoints
 logfeeder.metrics.collector.hosts=
-
-#filter config
-logfeeder.log.filter.enable=false
+logfeeder.config.dir=target/classes/log-samples/shipper-conf/
+logfeeder.config.files=target/classes/log-samples/shipper-conf/global.config.json,\
+ target/classes/log-samples/shipper-conf/input.config-sample.json,\
+  target/classes/log-samples/shipper-conf/output.config-sample.json
+logfeeder.log.filter.enable=true
 logfeeder.solr.config.interval=5
-logfeeder.solr.zk_connect_string=
-logfeeder.solr.url=
-
-logfeeder.solr.kerberos.enable=false
-logfeeder.solr.jaas.file=/usr/lib/ambari-logsearch-logfeeder/logsearch_solr_jaas.conf
-
-#logfeeder tmp dir 
-logfeeder.tmp.dir=/tmp/$username/logfeeder/
-
 logfeeder.solr.core.config.name=history
+logfeeder.solr.zk_connect_string=localhost:2181
+logfeeder.cache.enabled=true
+logfeeder.cache.size=100
+logfeeder.cache.key.field=log_message
+logfeeder.cache.dedup.interval=1000
+logfeeder.cache.last.dedup.enabled=true
+logsearch.config.zk_connect_string=localhost:2181
+logfeeder.include.default.level=FATAL,ERROR,WARN,INFO,DEBUG,TRACE,UNKNOWN
+
+#logfeeder tmp dir
+logfeeder.tmp.dir=target/tmp

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/metrics/MetricsManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/metrics/MetricsManagerTest.java b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/metrics/MetricsManagerTest.java
index 24042a7..f74a80e 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/metrics/MetricsManagerTest.java
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/src/test/java/org/apache/ambari/logfeeder/metrics/MetricsManagerTest.java
@@ -50,18 +50,20 @@ public class MetricsManagerTest {
   @Before
   public void init() throws Exception {
     manager = new MetricsManager();
-    manager.init();
-    
+
     mockClient = strictMock(LogFeederAMSClient.class);
     Field f = MetricsManager.class.getDeclaredField("amsClient");
     f.setAccessible(true);
     f.set(manager, mockClient);
-    
+
+    EasyMock.expect(mockClient.getCollectorUri(null)).andReturn("null://null:null/null").anyTimes();
     capture = EasyMock.newCapture(CaptureType.FIRST);
     mockClient.emitMetrics(EasyMock.capture(capture));
     EasyMock.expectLastCall().andReturn(true).once();
-    
+
     replay(mockClient);
+    manager.setAmsClient(mockClient);
+    manager.init();
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/.gitignore
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/.gitignore b/ambari-logsearch/ambari-logsearch-server/.gitignore
index 07e0389..7ea6a1f 100644
--- a/ambari-logsearch/ambari-logsearch-server/.gitignore
+++ b/ambari-logsearch/ambari-logsearch-server/.gitignore
@@ -6,4 +6,5 @@ target
 node_modules/
 logs/
 node/
+*.pid
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/README.md
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/README.md b/ambari-logsearch/ambari-logsearch-server/README.md
index 126f651..26b1f73 100644
--- a/ambari-logsearch/ambari-logsearch-server/README.md
+++ b/ambari-logsearch/ambari-logsearch-server/README.md
@@ -17,39 +17,23 @@ limitations under the License.
 {% endcomment %}
 -->
 
-#Compilation
-mvn clean compile package
+# Log Search Server
 
-#Deploy
-##Copy to remote
-copy target/logsearch-portal.tar.gz to host machine
-##Setup environment
-```bash
-mkdir /opt/logsearch
-cd /opt/logsearch
-tar xfz ~/logsearch-portal.tar.gz 
-```
-#Create Solr Collection
-*Edit for log retention days (default is 7 days)*
-```bash
-vi solr_configsets/hadoop_logs/conf/solrconfig.xml
-```
-```
-    <processor class="solr.DefaultValueUpdateProcessorFactory">
-        <str name="fieldName">_ttl_</str>
-        <str name="value">+7DAYS</str>
-    </processor>
-```
-```bash
-./create_collections.sh $SOLR_HOME $NUM_SHARDS $NUM_OF_REPLICATIONS `pwd`/solr_configsets
-```
+## Start locally from maven / IDE
+
+Other services (like zookeeper, solr, logfeeder) can be started with `docker-compose`
 ```bash
-vi classes/logsearch.properties
-```
-```
-solr.zkhosts=$ZK1:2181,$ZK2:2181,$ZK3:2181/solr
+cd ambari/ambari-logsearch/docker
+docker-compose up -d zookeeper solr logfeeder
 ```
-*This script will stop logsearch if it is running and restart it*
+
+Then you can start Log Search server from maven 
+
 ```bash
+cd ambari/ambari-logsearch/ambari-logsearch-server
 ./run.sh
+# or
+mvn clean spring-boot:run
 ```
+
+You can also start Log Search server from an IDE as well. One thing is important: the config set location that the server tries to upload to ZooKeeper. By default config sets are located at `${LOGSEARCH_SERVER_RELATIVE_LOCATION:}src/main/configsets` in `logsearch.properties`. Based or from where you run `LogSearch.java`, you need to set `LOGSEARCH_SERVER_RELATIVE_LOCATION` env variable properly. 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/pom.xml b/ambari-logsearch/ambari-logsearch-server/pom.xml
index 2ad35f5..5444b00 100755
--- a/ambari-logsearch/ambari-logsearch-server/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-server/pom.xml
@@ -118,13 +118,13 @@
                   <goal>copy-dependencies</goal>
                 </goals>
                 <configuration>
-                  <artifactItems>*</artifactItems>
                   <excludeArtifactIds>ambari-logsearch-web</excludeArtifactIds>
                   <outputAbsoluteArtifactFilename>true</outputAbsoluteArtifactFilename>
                   <outputDirectory>${basedir}/target/libs</outputDirectory>
                   <overWriteReleases>false</overWriteReleases>
                   <overWriteSnapshots>false</overWriteSnapshots>
                   <overWriteIfNewer>true</overWriteIfNewer>
+                  <includeScope>compile</includeScope>
                 </configuration>
               </execution>
             </executions>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/run.sh
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/run.sh b/ambari-logsearch/ambari-logsearch-server/run.sh
index 765fe6c..f245930 100755
--- a/ambari-logsearch/ambari-logsearch-server/run.sh
+++ b/ambari-logsearch/ambari-logsearch-server/run.sh
@@ -17,4 +17,4 @@ echo "
 ███████╗╚██████╔╝╚██████╔╝    ███████║███████╗██║  ██║██║  ██║╚██████╗██║  ██║
 ╚══════╝ ╚═════╝  ╚═════╝     ╚══════╝╚══════╝╚═╝  ╚═╝╚═╝  ╚═╝ ╚═════╝╚═╝  ╚═╝
 "
-mvn spring-boot:run
+mvn clean spring-boot:run

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/managed-schema b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/managed-schema
index c6f498b..93b2d6b 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/managed-schema
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/managed-schema
@@ -115,6 +115,8 @@
   <dynamicField name='std_*' type="text_std_token_lower_case" multiValued="false" stored="false"/>
   <dynamicField name='key_*' type="key_lower_case" multiValued="false" stored="false"/>
   <dynamicField name="ws_*" type="text_ws" multiValued="false" omitNorms="false" stored="false"/>
+  <dynamicField name="sdi_*" type="text_ws" omitNorms="false" multiValued="false" stored="true"/>
+
   <copyField source="log_message" dest="key_log_message"/>
 
 </schema>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/AuthPropsConfig.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/AuthPropsConfig.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/AuthPropsConfig.java
index 2bcdebc..06673b3 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/AuthPropsConfig.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/AuthPropsConfig.java
@@ -167,6 +167,16 @@ public class AuthPropsConfig {
   )
   private List<String> allowedRoles;
 
+  @Value("${logsearch.auth.redirect.forward:false}")
+  @LogSearchPropertyDescription(
+    name = "logsearch.auth.redirect.forward",
+    description = "Forward redirects for HTTP calls. (useful in case of proxies)",
+    examples = {"true"},
+    defaultValue = "false",
+    sources = {LOGSEARCH_PROPERTIES_FILE}
+  )
+  private boolean redirectForward;
+
   public boolean isAuthFileEnabled() {
     return authFileEnabled;
   }
@@ -278,4 +288,12 @@ public class AuthPropsConfig {
   public void setAllowedRoles(List<String> allowedRoles) {
     this.allowedRoles = allowedRoles;
   }
+
+  public boolean isRedirectForward() {
+    return redirectForward;
+  }
+
+  public void setRedirectForward(boolean redirectForward) {
+    this.redirectForward = redirectForward;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/SecurityConfig.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/SecurityConfig.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/SecurityConfig.java
index cb8124e..6f8d7ba 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/SecurityConfig.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/conf/SecurityConfig.java
@@ -44,6 +44,7 @@ import org.springframework.security.config.annotation.web.configuration.WebSecur
 import org.springframework.security.config.http.SessionCreationPolicy;
 import org.springframework.security.web.access.intercept.FilterSecurityInterceptor;
 import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter;
+import org.springframework.security.web.authentication.www.BasicAuthenticationFilter;
 import org.springframework.security.web.util.matcher.AntPathRequestMatcher;
 import org.springframework.security.web.util.matcher.OrRequestMatcher;
 import org.springframework.security.web.util.matcher.RequestMatcher;
@@ -105,8 +106,8 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter {
       .httpBasic()
         .authenticationEntryPoint(logsearchAuthenticationEntryPoint())
       .and()
-      .addFilterBefore(logsearchUsernamePasswordAuthenticationFilter(), UsernamePasswordAuthenticationFilter.class)
-      .addFilterBefore(logsearchKRBAuthenticationFilter(), UsernamePasswordAuthenticationFilter.class)
+      .addFilterBefore(logsearchKRBAuthenticationFilter(), BasicAuthenticationFilter.class)
+      .addFilterBefore(logsearchUsernamePasswordAuthenticationFilter(), LogsearchKRBAuthenticationFilter.class)
       .addFilterAfter(securityContextFormationFilter(), FilterSecurityInterceptor.class)
       .addFilterAfter(logsearchEventHistoryFilter(), LogsearchSecurityContextFormationFilter.class)
       .addFilterAfter(logsearchAuditLogFilter(), LogsearchSecurityContextFormationFilter.class)
@@ -153,6 +154,7 @@ public class SecurityConfig extends WebSecurityConfigurerAdapter {
   public LogsearchAuthenticationEntryPoint logsearchAuthenticationEntryPoint() {
     LogsearchAuthenticationEntryPoint entryPoint = new LogsearchAuthenticationEntryPoint("/login");
     entryPoint.setForceHttps(false);
+    entryPoint.setUseForward(authPropsConfig.isRedirectForward());
     return entryPoint;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/SolrConstants.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/SolrConstants.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/SolrConstants.java
index 60fc1a3..6554bcf 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/SolrConstants.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/SolrConstants.java
@@ -66,6 +66,7 @@ public class SolrConstants {
     public static final String STORED_TOKEN_DYNAMIC_FIELDS = "std_*";
     public static final String KEY_DYNAMIC_FIELDS = "key_*";
     public static final String WS_DYNAMIC_FIELDS = "ws_*";
+    public static final String SDI_DYNAMIC_FIELDS = "sdi_*";
   }
 
   public class AuditLogConstants {

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/model/SolrServiceLogData.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/model/SolrServiceLogData.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/model/SolrServiceLogData.java
index c6fdba3..224ed8c 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/model/SolrServiceLogData.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/solr/model/SolrServiceLogData.java
@@ -22,6 +22,7 @@ import org.apache.ambari.logsearch.model.response.ServiceLogData;
 import org.apache.solr.client.solrj.beans.Field;
 
 import java.util.Date;
+import java.util.Map;
 
 import static org.apache.ambari.logsearch.solr.SolrConstants.ServiceLogConstants.*;
 
@@ -48,6 +49,9 @@ public class SolrServiceLogData extends SolrCommonLogData implements ServiceLogD
   @Field(HOST)
   private String host;
 
+  @Field(SDI_DYNAMIC_FIELDS)
+  private Map<String, Object> sdiDynamicFields;
+
   @Override
   public String getPath() {
     return path;
@@ -117,4 +121,17 @@ public class SolrServiceLogData extends SolrCommonLogData implements ServiceLogD
   public void setLevel(String level) {
     this.level = level;
   }
+
+  public void setSdiDynamicFields(Map<String, Object> sdiDynamicFields) {
+    this.sdiDynamicFields = sdiDynamicFields;
+  }
+
+  @Override
+  public Map<String, Object> getAllDynamicFields() {
+    Map<String, Object> dynamicFieldsMap = super.getAllDynamicFields();
+    if (sdiDynamicFields != null) {
+      dynamicFieldsMap.putAll(sdiDynamicFields);
+    }
+    return dynamicFieldsMap;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/e83bf1bd/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/filters/LogsearchAuthenticationEntryPoint.java
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/filters/LogsearchAuthenticationEntryPoint.java b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/filters/LogsearchAuthenticationEntryPoint.java
index 1831697..2fe5f7b 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/filters/LogsearchAuthenticationEntryPoint.java
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/java/org/apache/ambari/logsearch/web/filters/LogsearchAuthenticationEntryPoint.java
@@ -44,7 +44,7 @@ public class LogsearchAuthenticationEntryPoint extends LoginUrlAuthenticationEnt
       response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Session Timeout");
     } else {
       logger.debug("Redirecting to login page :" + this.getLoginFormUrl());
-      response.sendRedirect(this.getLoginFormUrl() + ((request.getQueryString() != null) ? "?" + request.getQueryString() : ""));
+      super.commence(request, response, authException);
     }
   }
 }