You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ab...@apache.org on 2014/12/09 00:38:39 UTC

[2/2] ambari git commit: AMBARI-8373 - Refactor the OS-dependent Ambari Agent Windows components (Eugene Chekanskiy via abaranchuk)

AMBARI-8373 - Refactor the OS-dependent Ambari Agent Windows components (Eugene Chekanskiy via abaranchuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/15c65b93
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/15c65b93
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/15c65b93

Branch: refs/heads/trunk
Commit: 15c65b93005c7c7470d73db1cd6d53cab184e398
Parents: bca3317
Author: Artem Baranchuk <ab...@hortonworks.com>
Authored: Tue Dec 9 01:36:46 2014 +0200
Committer: Artem Baranchuk <ab...@hortonworks.com>
Committed: Tue Dec 9 01:36:46 2014 +0200

----------------------------------------------------------------------
 ambari-agent/conf/windows/service_wrapper.py    |  15 +-
 .../src/main/python/ambari_agent/ActionQueue.py |   2 -
 .../python/ambari_agent/AgentConfig_linux.py    | 230 ---------
 .../python/ambari_agent/AgentConfig_windows.py  | 232 ---------
 .../main/python/ambari_agent/AmbariConfig.py    | 181 +++++--
 .../src/main/python/ambari_agent/Controller.py  |  14 +-
 .../src/main/python/ambari_agent/Facter.py      |  36 +-
 .../src/main/python/ambari_agent/Hardware.py    |   4 +-
 .../python/ambari_agent/HeartbeatHandlers.py    | 147 ++++++
 .../ambari_agent/HeartbeatHandlers_windows.py   |  58 ---
 .../ambari_agent/HeartbeatStopHandler_linux.py  |  91 ----
 .../src/main/python/ambari_agent/HostCleanup.py |  10 +-
 .../src/main/python/ambari_agent/HostInfo.py    | 508 ++++++++++++++++++-
 .../main/python/ambari_agent/HostInfo_linux.py  | 380 --------------
 .../main/python/ambari_agent/HostInfo_win.py    | 227 ---------
 .../src/main/python/ambari_agent/NetUtil.py     |   9 +-
 .../python/ambari_agent/PackagesAnalyzer.py     |   1 -
 .../main/python/ambari_agent/PythonExecutor.py  |   4 +-
 .../src/main/python/ambari_agent/main.py        |  18 +-
 .../test/python/ambari_agent/TestActionQueue.py |   4 +-
 .../test/python/ambari_agent/TestHardware.py    |  10 +-
 .../test/python/ambari_agent/TestHeartbeat.py   |   8 +-
 .../test/python/ambari_agent/TestHostInfo.py    |  80 +--
 .../src/test/python/ambari_agent/TestMain.py    |  14 +-
 .../python/ambari_agent/TestRegistration.py     |   1 -
 .../src/main/python/ambari_commons/os_check.py  | 192 +++----
 .../python/ambari_commons/os_family_impl.py     |  64 +++
 .../src/main/python/ambari_commons/os_utils.py  |   4 +-
 .../functions/get_unique_id_and_date.py         |   2 +-
 ambari-server/src/main/python/ambari-server.py  |   2 +-
 .../python/ambari_server/dbConfiguration.py     |   4 +-
 .../python/ambari_server/serverConfiguration.py |   2 +-
 .../main/python/ambari_server/serverSetup.py    |   9 +-
 .../main/python/ambari_server/setupSecurity.py  |   4 +-
 ambari-server/src/test/python/TestOSCheck.py    |  22 -
 35 files changed, 1050 insertions(+), 1539 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/conf/windows/service_wrapper.py
----------------------------------------------------------------------
diff --git a/ambari-agent/conf/windows/service_wrapper.py b/ambari-agent/conf/windows/service_wrapper.py
index 1d11202..7eabb56 100644
--- a/ambari-agent/conf/windows/service_wrapper.py
+++ b/ambari-agent/conf/windows/service_wrapper.py
@@ -28,11 +28,18 @@ from ambari_commons.ambari_service import AmbariService, ENV_PYTHON_PATH
 from ambari_commons.exceptions import *
 from ambari_commons.logging_utils import *
 from ambari_commons.os_windows import WinServiceController
-from ambari_agent.AmbariConfig import AmbariConfig, SETUP_ACTION, START_ACTION, DEBUG_ACTION, STOP_ACTION, STATUS_ACTION
-from ambari_agent.HeartbeatHandlers_windows import HeartbeatStopHandler
+from ambari_agent.AmbariConfig import AmbariConfig
+from ambari_agent.HeartbeatHandlers import HeartbeatStopHandlers
 
 AMBARI_VERSION_VAR = "AMBARI_VERSION_VAR"
 
+SETUP_ACTION = "setup"
+START_ACTION = "start"
+STOP_ACTION = "stop"
+RESET_ACTION = "reset"
+STATUS_ACTION = "status"
+DEBUG_ACTION = "debug"
+
 def parse_options():
   # parse env cmd
   with open(os.path.join(os.getcwd(), "ambari-env.cmd"), "r") as env_cmd:
@@ -82,7 +89,7 @@ class AmbariAgentService(AmbariService):
     # Soft dependency on the Windows Time service
     ensure_time_service_is_started()
 
-    self.heartbeat_stop_handler = HeartbeatStopHandler(self._heventSvcStop)
+    self.heartbeat_stop_handler = HeartbeatStopHandlers(self._heventSvcStop)
 
     self.ReportServiceStatus(win32service.SERVICE_RUNNING)
 
@@ -101,7 +108,7 @@ class AmbariAgentService(AmbariService):
 def ensure_time_service_is_started():
   ret = WinServiceController.EnsureServiceIsStarted("W32Time")
   if 0 != ret:
-    raise FatalException(-1, "Error starting Windows Time service: " + string(ret))
+    raise FatalException(-1, "Error starting Windows Time service: " + str(ret))
   pass
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
index 4ecb822..fbde26f 100644
--- a/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
+++ b/ambari-agent/src/main/python/ambari_agent/ActionQueue.py
@@ -28,7 +28,6 @@ import json
 
 from AgentException import AgentException
 from LiveStatus import LiveStatus
-from shell import shellRunner
 from ActualConfigHandler import ActualConfigHandler
 from CommandStatusDict import CommandStatusDict
 from CustomServiceOrchestrator import CustomServiceOrchestrator
@@ -74,7 +73,6 @@ class ActionQueue(threading.Thread):
       self.status_update_callback)
     self.config = config
     self.controller = controller
-    self.sh = shellRunner()
     self.configTags = {}
     self._stop = threading.Event()
     self.tmpdir = config.get('agent', 'prefix')

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py b/ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py
deleted file mode 100644
index 4be52cb..0000000
--- a/ambari-agent/src/main/python/ambari_agent/AgentConfig_linux.py
+++ /dev/null
@@ -1,230 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-
-content = """
-
-[server]
-hostname=localhost
-url_port=8440
-secured_url_port=8441
-
-[agent]
-prefix=/tmp/ambari-agent
-tmp_dir=/tmp/ambari-agent/tmp
-data_cleanup_interval=86400
-data_cleanup_max_age=2592000
-data_cleanup_max_size_MB = 100
-ping_port=8670
-cache_dir=/var/lib/ambari-agent/cache
-run_as_user=root
-
-[services]
-
-[python]
-custom_actions_dir = /var/lib/ambari-agent/resources/custom_actions
-
-[command]
-maxretries=2
-sleepBetweenRetries=1
-
-[security]
-keysdir=/tmp/ambari-agent
-server_crt=ca.crt
-passphrase_env_var_name=AMBARI_PASSPHRASE
-
-[heartbeat]
-state_interval = 6
-dirs=/etc/hadoop,/etc/hadoop/conf,/var/run/hadoop,/var/log/hadoop
-log_lines_count=300
-
-"""
-
-imports = [
-  "hdp/manifests/*.pp",
-  "hdp-hadoop/manifests/*.pp",
-  "hdp-hbase/manifests/*.pp",
-  "hdp-zookeeper/manifests/*.pp",
-  "hdp-oozie/manifests/*.pp",
-  "hdp-pig/manifests/*.pp",
-  "hdp-sqoop/manifests/*.pp",
-  "hdp-templeton/manifests/*.pp",
-  "hdp-hive/manifests/*.pp",
-  "hdp-hcat/manifests/*.pp",
-  "hdp-mysql/manifests/*.pp",
-  "hdp-monitor-webserver/manifests/*.pp",
-  "hdp-repos/manifests/*.pp"
-]
-
-rolesToClass = {
-  'GLUSTERFS': 'hdp-hadoop::glusterfs',
-  'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
-  'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
-  'NAMENODE': 'hdp-hadoop::namenode',
-  'DATANODE': 'hdp-hadoop::datanode',
-  'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
-  'JOBTRACKER': 'hdp-hadoop::jobtracker',
-  'TASKTRACKER': 'hdp-hadoop::tasktracker',
-  'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
-  'NODEMANAGER': 'hdp-yarn::nodemanager',
-  'HISTORYSERVER': 'hdp-yarn::historyserver',
-  'YARN_CLIENT': 'hdp-yarn::yarn_client',
-  'HDFS_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
-  'ZOOKEEPER_SERVER': 'hdp-zookeeper',
-  'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
-  'HBASE_MASTER': 'hdp-hbase::master',
-  'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
-  'HBASE_CLIENT': 'hdp-hbase::client',
-  'PIG': 'hdp-pig',
-  'SQOOP': 'hdp-sqoop',
-  'OOZIE_SERVER': 'hdp-oozie::server',
-  'OOZIE_CLIENT': 'hdp-oozie::client',
-  'HIVE_CLIENT': 'hdp-hive::client',
-  'HCAT': 'hdp-hcat',
-  'HIVE_SERVER': 'hdp-hive::server',
-  'HIVE_METASTORE': 'hdp-hive::metastore',
-  'MYSQL_SERVER': 'hdp-mysql::server',
-  'WEBHCAT_SERVER': 'hdp-templeton::server',
-  'DASHBOARD': 'hdp-dashboard',
-  'GANGLIA_SERVER': 'hdp-ganglia::server',
-  'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
-  'HTTPD': 'hdp-monitor-webserver',
-  'HUE_SERVER': 'hdp-hue::server',
-  'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
-  'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
-  'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
-  'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
-  'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
-  'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
-  'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
-  'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
-  'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
-  'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
-  'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
-  'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
-  'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
-  'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
-  'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
-  'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
-  'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
-  'TEZ_CLIENT': 'hdp-tez::tez_client',
-  'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
-  'FLUME_SERVER': 'hdp-flume',
-  'JOURNALNODE': 'hdp-hadoop::journalnode',
-  'ZKFC': 'hdp-hadoop::zkfc'
-}
-
-serviceStates = {
-  'START': 'running',
-  'INSTALL': 'installed_and_configured',
-  'STOP': 'stopped'
-}
-
-servicesToPidNames = {
-  'GLUSTERFS' : 'glusterd.pid$',
-  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
-  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
-  'DATANODE': 'hadoop-{USER}-datanode.pid$',
-  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
-  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
-  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
-  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
-  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
-  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
-  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
-  'OOZIE_SERVER': 'oozie.pid',
-  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
-  'FLUME_SERVER': 'flume-node.pid',
-  'TEMPLETON_SERVER': 'templeton.pid',
-  'GANGLIA_SERVER': 'gmetad.pid',
-  'GANGLIA_MONITOR': 'gmond.pid',
-  'HBASE_MASTER': 'hbase-{USER}-master.pid',
-  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
-  'HCATALOG_SERVER': 'webhcat.pid',
-  'KERBEROS_SERVER': 'kadmind.pid',
-  'HIVE_SERVER': 'hive-server.pid',
-  'HIVE_METASTORE': 'hive.pid',
-  'MYSQL_SERVER': 'mysqld.pid',
-  'HUE_SERVER': '/var/run/hue/supervisor.pid',
-  'WEBHCAT_SERVER': 'webhcat.pid',
-}
-
-#Each service, which's pid depends on user should provide user mapping
-servicesToLinuxUser = {
-  'NAMENODE': 'hdfs_user',
-  'SECONDARY_NAMENODE': 'hdfs_user',
-  'DATANODE': 'hdfs_user',
-  'JOURNALNODE': 'hdfs_user',
-  'ZKFC': 'hdfs_user',
-  'JOBTRACKER': 'mapred_user',
-  'TASKTRACKER': 'mapred_user',
-  'RESOURCEMANAGER': 'yarn_user',
-  'NODEMANAGER': 'yarn_user',
-  'HISTORYSERVER': 'mapred_user',
-  'HBASE_MASTER': 'hbase_user',
-  'HBASE_REGIONSERVER': 'hbase_user',
-}
-
-pidPathVars = [
-  {'var' : 'glusterfs_pid_dir_prefix',
-    'defaultValue' : '/var/run'},
-  {'var' : 'hadoop_pid_dir_prefix',
-    'defaultValue' : '/var/run/hadoop'},
-  {'var' : 'hadoop_pid_dir_prefix',
-    'defaultValue' : '/var/run/hadoop'},
-  {'var' : 'ganglia_runtime_dir',
-    'defaultValue' : '/var/run/ganglia/hdp'},
-  {'var' : 'hbase_pid_dir',
-    'defaultValue' : '/var/run/hbase'},
-  {'var' : 'zk_pid_dir',
-    'defaultValue' : '/var/run/zookeeper'},
-  {'var' : 'oozie_pid_dir',
-    'defaultValue' : '/var/run/oozie'},
-  {'var' : 'hcat_pid_dir',
-    'defaultValue' : '/var/run/webhcat'},
-  {'var' : 'hive_pid_dir',
-    'defaultValue' : '/var/run/hive'},
-  {'var' : 'mysqld_pid_dir',
-    'defaultValue' : '/var/run/mysqld'},
-  {'var' : 'hcat_pid_dir',
-    'defaultValue' : '/var/run/webhcat'},
-  {'var' : 'yarn_pid_dir_prefix',
-    'defaultValue' : '/var/run/hadoop-yarn'},
-  {'var' : 'mapred_pid_dir_prefix',
-    'defaultValue' : '/var/run/hadoop-mapreduce'},
-]
-
-if 'AMBARI_AGENT_CONF_DIR' in os.environ:
-  configFile = os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
-else:
-  configFile = "/etc/ambari-agent/conf/ambari-agent.ini"
-
-if 'AMBARI_AGENT_LOG_DIR' in os.environ:
-  logfile = os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
-else:
-  logfile = "/var/log/ambari-agent/ambari-agent.log"
-
-if 'AMBARI_AGENT_OUT_DIR' in os.environ:
-  outfile = os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
-else:
-  outfile = "/var/log/ambari-agent/ambari-agent.out"

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py b/ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py
deleted file mode 100644
index e5e1b22..0000000
--- a/ambari-agent/src/main/python/ambari_agent/AgentConfig_windows.py
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-
-content = """
-
-[server]
-hostname=localhost
-url_port=8440
-secured_url_port=8441
-
-[agent]
-prefix=\\tmp\\ambari-agent
-data_cleanup_interval=86400
-data_cleanup_max_age=2592000
-ping_port=8670
-cache_dir=\\var\\lib\\ambari-agent\\cache
-
-[services]
-
-[python]
-custom_actions_dir = \\var\\lib\\ambari-agent\\resources\\custom_actions
-
-[command]
-maxretries=2
-sleepBetweenRetries=1
-
-[security]
-keysdir=\\tmp\\ambari-agent
-server_crt=ca.crt
-passphrase_env_var_name=AMBARI_PASSPHRASE
-
-[heartbeat]
-state_interval = 6
-dirs=\\etc\\hadoop,\\etc\\hadoop\\conf,\\var\\run\\hadoop,\\var\\log\\hadoop
-rpms=glusterfs,openssl,wget,net-snmp,ntpd,ganglia,nagios,glusterfs
-log_lines_count=300
-
-"""
-
-imports = [
-  "hdp\\manifests\\*.pp",
-  "hdp-hadoop\\manifests\\*.pp",
-  "hdp-hbase\\manifests\\*.pp",
-  "hdp-zookeeper\\manifests\\*.pp",
-  "hdp-oozie\\manifests\\*.pp",
-  "hdp-pig\\manifests\\*.pp",
-  "hdp-sqoop\\manifests\\*.pp",
-  "hdp-templeton\\manifests\\*.pp",
-  "hdp-hive\\manifests\\*.pp",
-  "hdp-hcat\\manifests\\*.pp",
-  "hdp-mysql\\manifests\\*.pp",
-  "hdp-monitor-webserver\\manifests\\*.pp",
-  "hdp-repos\\manifests\\*.pp"
-]
-
-rolesToClass = {
-  'GLUSTERFS': 'hdp-hadoop::glusterfs',
-  'GLUSTERFS_CLIENT': 'hdp-hadoop::glusterfs_client',
-  'GLUSTERFS_SERVICE_CHECK': 'hdp-hadoop::glusterfs_service_check',
-  'NAMENODE': 'hdp-hadoop::namenode',
-  'DATANODE': 'hdp-hadoop::datanode',
-  'SECONDARY_NAMENODE': 'hdp-hadoop::snamenode',
-  'JOBTRACKER': 'hdp-hadoop::jobtracker',
-  'TASKTRACKER': 'hdp-hadoop::tasktracker',
-  'RESOURCEMANAGER': 'hdp-yarn::resourcemanager',
-  'NODEMANAGER': 'hdp-yarn::nodemanager',
-  'HISTORYSERVER': 'hdp-yarn::historyserver',
-  'YARN_CLIENT': 'hdp-yarn::yarn_client',
-  'HDFS_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
-  'ZOOKEEPER_SERVER': 'hdp-zookeeper',
-  'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
-  'HBASE_MASTER': 'hdp-hbase::master',
-  'HBASE_REGIONSERVER': 'hdp-hbase::regionserver',
-  'HBASE_CLIENT': 'hdp-hbase::client',
-  'PIG': 'hdp-pig',
-  'SQOOP': 'hdp-sqoop',
-  'OOZIE_SERVER': 'hdp-oozie::server',
-  'OOZIE_CLIENT': 'hdp-oozie::client',
-  'HIVE_CLIENT': 'hdp-hive::client',
-  'HCAT': 'hdp-hcat',
-  'HIVE_SERVER': 'hdp-hive::server',
-  'HIVE_METASTORE': 'hdp-hive::metastore',
-  'MYSQL_SERVER': 'hdp-mysql::server',
-  'WEBHCAT_SERVER': 'hdp-templeton::server',
-  'DASHBOARD': 'hdp-dashboard',
-  'NAGIOS_SERVER': 'hdp-nagios::server',
-  'GANGLIA_SERVER': 'hdp-ganglia::server',
-  'GANGLIA_MONITOR': 'hdp-ganglia::monitor',
-  'HTTPD': 'hdp-monitor-webserver',
-  'HUE_SERVER': 'hdp-hue::server',
-  'HDFS_SERVICE_CHECK': 'hdp-hadoop::hdfs::service_check',
-  'MAPREDUCE_SERVICE_CHECK': 'hdp-hadoop::mapred::service_check',
-  'MAPREDUCE2_SERVICE_CHECK': 'hdp-yarn::mapred2::service_check',
-  'ZOOKEEPER_SERVICE_CHECK': 'hdp-zookeeper::zookeeper::service_check',
-  'ZOOKEEPER_QUORUM_SERVICE_CHECK': 'hdp-zookeeper::quorum::service_check',
-  'HBASE_SERVICE_CHECK': 'hdp-hbase::hbase::service_check',
-  'HIVE_SERVICE_CHECK': 'hdp-hive::hive::service_check',
-  'HCAT_SERVICE_CHECK': 'hdp-hcat::hcat::service_check',
-  'OOZIE_SERVICE_CHECK': 'hdp-oozie::oozie::service_check',
-  'PIG_SERVICE_CHECK': 'hdp-pig::pig::service_check',
-  'SQOOP_SERVICE_CHECK': 'hdp-sqoop::sqoop::service_check',
-  'WEBHCAT_SERVICE_CHECK': 'hdp-templeton::templeton::service_check',
-  'DASHBOARD_SERVICE_CHECK': 'hdp-dashboard::dashboard::service_check',
-  'DECOMMISSION_DATANODE': 'hdp-hadoop::hdfs::decommission',
-  'HUE_SERVICE_CHECK': 'hdp-hue::service_check',
-  'RESOURCEMANAGER_SERVICE_CHECK': 'hdp-yarn::resourcemanager::service_check',
-  'HISTORYSERVER_SERVICE_CHECK': 'hdp-yarn::historyserver::service_check',
-  'TEZ_CLIENT': 'hdp-tez::tez_client',
-  'YARN_SERVICE_CHECK': 'hdp-yarn::yarn::service_check',
-  'FLUME_SERVER': 'hdp-flume',
-  'JOURNALNODE': 'hdp-hadoop::journalnode',
-  'ZKFC': 'hdp-hadoop::zkfc'
-}
-
-serviceStates = {
-  'START': 'running',
-  'INSTALL': 'installed_and_configured',
-  'STOP': 'stopped'
-}
-
-servicesToPidNames = {
-  'GLUSTERFS' : 'glusterd.pid$',
-  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
-  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
-  'DATANODE': 'hadoop-{USER}-datanode.pid$',
-  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
-  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
-  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
-  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
-  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
-  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
-  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
-  'OOZIE_SERVER': 'oozie.pid',
-  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
-  'FLUME_SERVER': 'flume-node.pid',
-  'TEMPLETON_SERVER': 'templeton.pid',
-  'NAGIOS_SERVER': 'nagios.pid',
-  'GANGLIA_SERVER': 'gmetad.pid',
-  'GANGLIA_MONITOR': 'gmond.pid',
-  'HBASE_MASTER': 'hbase-{USER}-master.pid',
-  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
-  'HCATALOG_SERVER': 'webhcat.pid',
-  'KERBEROS_SERVER': 'kadmind.pid',
-  'HIVE_SERVER': 'hive-server.pid',
-  'HIVE_METASTORE': 'hive.pid',
-  'MYSQL_SERVER': 'mysqld.pid',
-  'HUE_SERVER': '\\var\\run\\hue\\supervisor.pid',
-  'WEBHCAT_SERVER': 'webhcat.pid',
-}
-
-#Each service, which's pid depends on user should provide user mapping
-servicesToLinuxUser = {
-  'NAMENODE': 'hdfs_user',
-  'SECONDARY_NAMENODE': 'hdfs_user',
-  'DATANODE': 'hdfs_user',
-  'JOURNALNODE': 'hdfs_user',
-  'ZKFC': 'hdfs_user',
-  'JOBTRACKER': 'mapred_user',
-  'TASKTRACKER': 'mapred_user',
-  'RESOURCEMANAGER': 'yarn_user',
-  'NODEMANAGER': 'yarn_user',
-  'HISTORYSERVER': 'mapred_user',
-  'HBASE_MASTER': 'hbase_user',
-  'HBASE_REGIONSERVER': 'hbase_user',
-}
-
-pidPathVars = [
-  {'var' : 'glusterfs_pid_dir_prefix',
-   'defaultValue' : '\\var\\run'},
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '\\var\\run\\hadoop'},
-  {'var' : 'hadoop_pid_dir_prefix',
-   'defaultValue' : '\\var\\run\\hadoop'},
-  {'var' : 'ganglia_runtime_dir',
-   'defaultValue' : '\\var\\run\\ganglia\\hdp'},
-  {'var' : 'hbase_pid_dir',
-   'defaultValue' : '\\var\\run\\hbase'},
-  {'var' : '',
-   'defaultValue' : '\\var\\run\\nagios'},
-  {'var' : 'zk_pid_dir',
-   'defaultValue' : '\\var\\run\\zookeeper'},
-  {'var' : 'oozie_pid_dir',
-   'defaultValue' : '\\var\\run\\oozie'},
-  {'var' : 'hcat_pid_dir',
-   'defaultValue' : '\\var\\run\\webhcat'},
-  {'var' : 'hive_pid_dir',
-   'defaultValue' : '\\var\\run\\hive'},
-  {'var' : 'mysqld_pid_dir',
-   'defaultValue' : '\\var\\run\\mysqld'},
-  {'var' : 'hcat_pid_dir',
-   'defaultValue' : '\\var\\run\\webhcat'},
-  {'var' : 'yarn_pid_dir_prefix',
-   'defaultValue' : '\\var\\run\\hadoop-yarn'},
-  {'var' : 'mapred_pid_dir_prefix',
-   'defaultValue' : '\\var\\run\\hadoop-mapreduce'},
-]
-
-if 'AMBARI_AGENT_CONF_DIR' in os.environ:
-  configFile = os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
-else:
-  configFile = "ambari-agent.ini"
-
-if 'AMBARI_AGENT_LOG_DIR' in os.environ:
-  logfile = os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
-else:
-  logfile = "\\var\\log\\ambari-agent-1.3.0-SNAPSHOT\\ambari-agent.log"
-
-if 'AMBARI_AGENT_OUT_DIR' in os.environ:
-  outfile = os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
-else:
-  outfile = "\\var\\log\\ambari-agent-1.3.0-SNAPSHOT\\ambari-agent.out"

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
index 4bae50b..8975729 100644
--- a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
+++ b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
@@ -18,35 +18,128 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
-import platform
-
 import ConfigParser
 import StringIO
 import json
 from NetUtil import NetUtil
+import os
+content = """
+
+[server]
+hostname=localhost
+url_port=8440
+secured_url_port=8441
+
+[agent]
+prefix={ps}tmp{ps}ambari-agent
+tmp_dir={ps}tmp{ps}ambari-agent{ps}tmp
+data_cleanup_interval=86400
+data_cleanup_max_age=2592000
+data_cleanup_max_size_MB = 100
+ping_port=8670
+cache_dir={ps}var{ps}lib{ps}ambari-agent{ps}cache
+
+[services]
+
+[python]
+custom_actions_dir = {ps}var{ps}lib{ps}ambari-agent{ps}resources{ps}custom_actions
+
+[command]
+maxretries=2
+sleepBetweenRetries=1
+
+[security]
+keysdir={ps}tmp{ps}ambari-agent
+server_crt=ca.crt
+passphrase_env_var_name=AMBARI_PASSPHRASE
+
+[heartbeat]
+state_interval = 6
+dirs={ps}etc{ps}hadoop,{ps}etc{ps}hadoop{ps}conf,{ps}var{ps}run{ps}hadoop,{ps}var{ps}log{ps}hadoop
+log_lines_count=300
+
+""".format(ps=os.sep)
+
+
+servicesToPidNames = {
+  'GLUSTERFS' : 'glusterd.pid$',
+  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
+  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
+  'DATANODE': 'hadoop-{USER}-datanode.pid$',
+  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
+  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
+  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
+  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
+  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
+  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
+  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
+  'OOZIE_SERVER': 'oozie.pid',
+  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
+  'FLUME_SERVER': 'flume-node.pid',
+  'TEMPLETON_SERVER': 'templeton.pid',
+  'GANGLIA_SERVER': 'gmetad.pid',
+  'GANGLIA_MONITOR': 'gmond.pid',
+  'HBASE_MASTER': 'hbase-{USER}-master.pid',
+  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
+  'HCATALOG_SERVER': 'webhcat.pid',
+  'KERBEROS_SERVER': 'kadmind.pid',
+  'HIVE_SERVER': 'hive-server.pid',
+  'HIVE_METASTORE': 'hive.pid',
+  'MYSQL_SERVER': 'mysqld.pid',
+  'HUE_SERVER': '/var/run/hue/supervisor.pid',
+  'WEBHCAT_SERVER': 'webhcat.pid',
+}
+
+#Each service, which's pid depends on user should provide user mapping
+servicesToLinuxUser = {
+  'NAMENODE': 'hdfs_user',
+  'SECONDARY_NAMENODE': 'hdfs_user',
+  'DATANODE': 'hdfs_user',
+  'JOURNALNODE': 'hdfs_user',
+  'ZKFC': 'hdfs_user',
+  'JOBTRACKER': 'mapred_user',
+  'TASKTRACKER': 'mapred_user',
+  'RESOURCEMANAGER': 'yarn_user',
+  'NODEMANAGER': 'yarn_user',
+  'HISTORYSERVER': 'mapred_user',
+  'HBASE_MASTER': 'hbase_user',
+  'HBASE_REGIONSERVER': 'hbase_user',
+}
+
+pidPathVars = [
+  {'var' : 'glusterfs_pid_dir_prefix',
+   'defaultValue' : '/var/run'},
+  {'var' : 'hadoop_pid_dir_prefix',
+   'defaultValue' : '/var/run/hadoop'},
+  {'var' : 'hadoop_pid_dir_prefix',
+   'defaultValue' : '/var/run/hadoop'},
+  {'var' : 'ganglia_runtime_dir',
+   'defaultValue' : '/var/run/ganglia/hdp'},
+  {'var' : 'hbase_pid_dir',
+   'defaultValue' : '/var/run/hbase'},
+  {'var' : 'zk_pid_dir',
+   'defaultValue' : '/var/run/zookeeper'},
+  {'var' : 'oozie_pid_dir',
+   'defaultValue' : '/var/run/oozie'},
+  {'var' : 'hcat_pid_dir',
+   'defaultValue' : '/var/run/webhcat'},
+  {'var' : 'hive_pid_dir',
+   'defaultValue' : '/var/run/hive'},
+  {'var' : 'mysqld_pid_dir',
+   'defaultValue' : '/var/run/mysqld'},
+  {'var' : 'hcat_pid_dir',
+   'defaultValue' : '/var/run/webhcat'},
+  {'var' : 'yarn_pid_dir_prefix',
+   'defaultValue' : '/var/run/hadoop-yarn'},
+  {'var' : 'mapred_pid_dir_prefix',
+   'defaultValue' : '/var/run/hadoop-mapreduce'},
+]
 
-SETUP_ACTION = "setup"
-START_ACTION = "start"
-STOP_ACTION = "stop"
-RESET_ACTION = "reset"
-STATUS_ACTION = "status"
-DEBUG_ACTION = "debug"
-
-IS_WINDOWS = platform.system() == "Windows"
-
-if not IS_WINDOWS:
-  from AgentConfig_linux import *
-else:
-  from AgentConfig_windows import *
 
-config = ConfigParser.RawConfigParser()
 
-s = StringIO.StringIO(content)
-config.readfp(s)
 
 class AmbariConfig:
   TWO_WAY_SSL_PROPERTY = "security.server.two_way_ssl"
-  CONFIG_FILE = "/etc/ambari-agent/conf/ambari-agent.ini"
   SERVER_CONNECTION_INFO = "{0}/connection_info"
   CONNECTION_PROTOCOL = "https"
 
@@ -73,46 +166,32 @@ class AmbariConfig:
   def add_section(self, section):
     self.config.add_section(section)
 
-  @staticmethod
-  def getConfigFile():
-    global configFile
-    return configFile
-
-  @staticmethod
-  def getLogFile():
-    global logfile
-    return logfile
-
-  @staticmethod
-  def getOutFile():
-    global outfile
-    return outfile
-
   def setConfig(self, customConfig):
     self.config = customConfig
 
   def getConfig(self):
     return self.config
 
-  def getImports(self):
-    global imports
-    return imports
-
-  def getRolesToClass(self):
-    global rolesToClass
-    return rolesToClass
-
-  def getServiceStates(self):
-    global serviceStates
-    return serviceStates
+  @staticmethod
+  def getConfigFile():
+    if 'AMBARI_AGENT_CONF_DIR' in os.environ:
+      return os.path.join(os.environ['AMBARI_AGENT_CONF_DIR'], "ambari-agent.ini")
+    else:
+      return os.path.join(os.sep, "etc", "ambari-agent", "conf", "ambari-agent.ini")
 
-  def getServicesToPidNames(self):
-    global servicesToPidNames
-    return servicesToPidNames
+  @staticmethod
+  def getLogFile():
+    if 'AMBARI_AGENT_LOG_DIR' in os.environ:
+      return os.path.join(os.environ['AMBARI_AGENT_LOG_DIR'], "ambari-agent.log")
+    else:
+      return os.path.join(os.sep, "var", "log", "ambari-agent", "ambari-agent.log")
 
-  def pidPathVars(self):
-    global pidPathVars
-    return pidPathVars
+  @staticmethod
+  def getOutFile():
+    if 'AMBARI_AGENT_OUT_DIR' in os.environ:
+      return os.path.join(os.environ['AMBARI_AGENT_OUT_DIR'], "ambari-agent.out")
+    else:
+      return os.path.join(os.sep, "var", "log", "ambari-agent", "ambari-agent.out")
 
   def has_option(self, section, option):
     return self.config.has_option(section, option)

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/Controller.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Controller.py b/ambari-agent/src/main/python/ambari_agent/Controller.py
index d985b91..b80f08e 100644
--- a/ambari-agent/src/main/python/ambari_agent/Controller.py
+++ b/ambari-agent/src/main/python/ambari_agent/Controller.py
@@ -42,25 +42,19 @@ from FileCache import FileCache
 from NetUtil import NetUtil
 from LiveStatus import LiveStatus
 from AlertSchedulerHandler import AlertSchedulerHandler
+from HeartbeatHandlers import HeartbeatStopHandlers, bind_signal_handlers
 
 logger = logging.getLogger()
 
 AGENT_AUTO_RESTART_EXIT_CODE = 77
 
-IS_WINDOWS = platform.system() == "Windows"
-
 class Controller(threading.Thread):
 
   def __init__(self, config, heartbeat_stop_callback = None, range=30):
     threading.Thread.__init__(self)
     logger.debug('Initializing Controller RPC thread.')
-
     if heartbeat_stop_callback is None:
-      if IS_WINDOWS:
-        from HeartbeatHandlers_windows import HeartbeatStopHandler
-      else:
-        from HeartbeatStopHandler_linux import HeartbeatStopHandler
-      heartbeat_stop_callback = HeartbeatStopHandler()
+      heartbeat_stop_callback = HeartbeatStopHandlers()
 
     self.lock = threading.Lock()
     self.safeMode = True
@@ -419,10 +413,6 @@ class Controller(threading.Thread):
 
 def main(argv=None):
   # Allow Ctrl-C
-  if IS_WINDOWS:
-    from HeartbeatHandlers_windows import bind_signal_handlers
-  else:
-    from HeartbeatStopHandler_linux import bind_signal_handlers
 
   logger.setLevel(logging.INFO)
   formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/Facter.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Facter.py b/ambari-agent/src/main/python/ambari_agent/Facter.py
index aabc77d..41a84af 100644
--- a/ambari-agent/src/main/python/ambari_agent/Facter.py
+++ b/ambari-agent/src/main/python/ambari_agent/Facter.py
@@ -30,7 +30,8 @@ import subprocess
 from shell import shellRunner
 import time
 import uuid
-from ambari_commons import OSCheck
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
 
 log = logging.getLogger()
 
@@ -47,7 +48,7 @@ def run_os_command(cmd):
   return process.returncode, stdoutdata, stderrdata
 
 
-class FacterBase():
+class Facter(object):
   def __init__(self):
     pass
 
@@ -180,8 +181,8 @@ class FacterBase():
   def convertSizeMbToGb(size):
     return "%0.2f GB" % round(float(size) / (1024.0), 2)
 
-
-class FacterWindows(FacterBase):
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class FacterWindows(Facter):
   GET_SYSTEM_INFO_CMD = "systeminfo"
   GET_MEMORY_CMD = '$mem =(Get-WMIObject Win32_OperatingSystem -ComputerName "LocalHost" ); echo "$($mem.FreePhysicalMemory) $($mem.TotalVisibleMemorySize)"'
   GET_PAGE_FILE_INFO = '$pgo=(Get-WmiObject Win32_PageFileUsage); echo "$($pgo.AllocatedBaseSize) $($pgo.AllocatedBaseSize-$pgo.CurrentUsage)"'
@@ -274,13 +275,14 @@ class FacterWindows(FacterBase):
     return 0
 
   def facterInfo(self):
-    facterInfo = FacterBase.facterInfo(self)
-    facterInfo['swapsize'] = FacterBase.convertSizeMbToGb(self.getSwapSize())
-    facterInfo['swapfree'] = FacterBase.convertSizeMbToGb(self.getSwapFree())
+    facterInfo = super(FacterWindows, self).facterInfo()
+    facterInfo['swapsize'] = Facter.convertSizeMbToGb(self.getSwapSize())
+    facterInfo['swapfree'] = Facter.convertSizeMbToGb(self.getSwapFree())
     return facterInfo
 
 
-class FacterLinux(FacterBase):
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class FacterLinux(Facter):
   # selinux command
   GET_SE_LINUX_ST_CMD = "/usr/sbin/sestatus"
   GET_IFCONFIG_CMD = "ifconfig"
@@ -289,9 +291,9 @@ class FacterLinux(FacterBase):
 
   def __init__(self):
 
-    self.DATA_IFCONFIG_OUTPUT = Facter.setDataIfConfigOutput()
-    self.DATA_UPTIME_OUTPUT = Facter.setDataUpTimeOutput()
-    self.DATA_MEMINFO_OUTPUT = Facter.setMemInfoOutput()
+    self.DATA_IFCONFIG_OUTPUT = FacterLinux.setDataIfConfigOutput()
+    self.DATA_UPTIME_OUTPUT = FacterLinux.setDataUpTimeOutput()
+    self.DATA_MEMINFO_OUTPUT = FacterLinux.setMemInfoOutput()
 
   @staticmethod
   def setDataIfConfigOutput():
@@ -442,19 +444,13 @@ class FacterLinux(FacterBase):
       return 0
 
   def facterInfo(self):
-    facterInfo = FacterBase.facterInfo(self)
+    facterInfo = super(FacterLinux, self).facterInfo()
     facterInfo['selinux'] = self.isSeLinux()
-    facterInfo['swapsize'] = FacterBase.convertSizeKbToGb(self.getSwapSize())
-    facterInfo['swapfree'] = FacterBase.convertSizeKbToGb(self.getSwapFree())
+    facterInfo['swapsize'] = Facter.convertSizeKbToGb(self.getSwapSize())
+    facterInfo['swapfree'] = Facter.convertSizeKbToGb(self.getSwapFree())
     return facterInfo
 
 
-if platform.system() == "Windows":
-  Facter = FacterWindows
-else:
-  Facter = FacterLinux
-
-
 def main(argv=None):
   print Facter().facterInfo()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/Hardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index 8b93355..4f6e756 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -24,7 +24,7 @@ import subprocess
 import platform
 from shell import shellRunner
 from Facter import Facter
-
+from ambari_commons.os_check import OSConst, OSCheck
 logger = logging.getLogger()
 
 class Hardware:
@@ -62,7 +62,7 @@ class Hardware:
 
   @staticmethod
   def osdisks():
-    if platform.system() == "Windows":
+    if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
       return Hardware._osdisks_win()
     else:
       return Hardware._osdisks_linux()

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers.py b/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers.py
new file mode 100644
index 0000000..8d4984b
--- /dev/null
+++ b/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from ambari_commons.exceptions import FatalException
+from ambari_commons.os_check import OSConst, OSCheck
+import os
+import logging
+import signal
+import threading
+import traceback
+from ambari_commons.os_family_impl import OsFamilyImpl
+logger = logging.getLogger()
+
+_handler = None
+
+class HeartbeatStopHandlers(object):pass
+
+# windows impl
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HeartbeatStopHandlersWindows(HeartbeatStopHandlers):
+  def __init__(self, stopEvent=None):
+    import win32event
+    # Event is used for synchronizing heartbeat iterations (to make possible
+    # manual wait() interruption between heartbeats )
+    self._heventHeartbeat = win32event.CreateEvent(None, 0, 0, None)
+
+    # Event is used to stop the Agent process
+    if stopEvent is None:
+      # Allow standalone testing
+      self._heventStop = win32event.CreateEvent(None, 0, 0, None)
+    else:
+      # Allow one unique event per process
+      self._heventStop = stopEvent
+
+  def set_heartbeat(self):
+    import win32event
+
+    win32event.SetEvent(self._heventHeartbeat)
+
+  def reset_heartbeat(self):
+    import win32event
+
+    win32event.ResetEvent(self._heventHeartbeat)
+
+  def wait(self, timeout1, timeout2=0):
+    import win32event
+
+    timeout = int(timeout1 + timeout2) * 1000
+
+    result = win32event.WaitForMultipleObjects([self._heventStop, self._heventHeartbeat], False, timeout)
+    if (
+          win32event.WAIT_OBJECT_0 != result and win32event.WAIT_OBJECT_0 + 1 != result and win32event.WAIT_TIMEOUT != result):
+      raise FatalException(-1, "Error waiting for stop/heartbeat events: " + str(result))
+    if (win32event.WAIT_TIMEOUT == result):
+      return -1
+    return result
+
+# linux impl
+
+def signal_handler(signum, frame):
+  global _handler
+  _handler.set_stop()
+
+
+def debug(sig, frame):
+  """Interrupt running process, and provide a python prompt for
+  interactive debugging."""
+  d = {'_frame': frame}  # Allow access to frame object.
+  d.update(frame.f_globals)  # Unless shadowed by global
+  d.update(frame.f_locals)
+
+  message = "Signal received : entering python shell.\nTraceback:\n"
+  message += ''.join(traceback.format_stack(frame))
+  logger.info(message)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HeartbeatStopHandlersLinux(HeartbeatStopHandlers):
+  def __init__(self, stopEvent=None):
+    # Event is used for synchronizing heartbeat iterations (to make possible
+    # manual wait() interruption between heartbeats )
+    self.heartbeat_wait_event = threading.Event()
+
+    # Event is used to stop the Agent process
+    if stopEvent is None:
+      # Allow standalone testing
+      self.stop_event = threading.Event()
+    else:
+      # Allow one unique event per process
+      self.stop_event = stopEvent
+
+  def set_heartbeat(self):
+    self.heartbeat_wait_event.set()
+
+  def reset_heartbeat(self):
+    self.heartbeat_wait_event.clear()
+
+  def set_stop(self):
+    self.stop_event.set()
+
+  def wait(self, timeout1, timeout2=0):
+    if self.heartbeat_wait_event.wait(timeout=timeout1):
+      # Event signaled, exit
+      return 0
+    # Stop loop when stop event received
+    # Otherwise sleep a bit more to allow STATUS_COMMAND results to be collected
+    # and sent in one heartbeat. Also avoid server overload with heartbeats
+    if self.stop_event.wait(timeout=timeout2):
+      logger.info("Stop event received")
+      return 1
+    # Timeout
+    return -1
+
+
+
+
+def bind_signal_handlers(agentPid):
+  global _handler
+  if OSCheck.get_os_family() != OSConst.WINSRV_FAMILY:
+    if os.getpid() == agentPid:
+      signal.signal(signal.SIGINT, signal_handler)
+      signal.signal(signal.SIGTERM, signal_handler)
+      signal.signal(signal.SIGUSR1, debug)
+    _handler = HeartbeatStopHandlersLinux()
+  else:
+    _handler = HeartbeatStopHandlersWindows()
+  return _handler
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py b/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py
deleted file mode 100644
index 1b6c7de..0000000
--- a/ambari-agent/src/main/python/ambari_agent/HeartbeatHandlers_windows.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import win32event
-
-from ambari_commons.exceptions import FatalException
-
-
-def bind_signal_handlers(agentPid):
-  return HeartbeatStopHandler()
-
-
-class HeartbeatStopHandler:
-  def __init__(self, stopEvent = None):
-    # Event is used for synchronizing heartbeat iterations (to make possible
-    # manual wait() interruption between heartbeats )
-    self._heventHeartbeat = win32event.CreateEvent(None, 0, 0, None)
-
-    # Event is used to stop the Agent process
-    if stopEvent is None:
-      #Allow standalone testing
-      self._heventStop = win32event.CreateEvent(None, 0, 0, None)
-    else:
-      #Allow one unique event per process
-      self._heventStop = stopEvent
-
-  def set_heartbeat(self):
-    win32event.SetEvent(self._heventHeartbeat)
-
-  def reset_heartbeat(self):
-    win32event.ResetEvent(self._heventHeartbeat)
-
-  def wait(self, timeout1, timeout2 = 0):
-    timeout = int(timeout1 + timeout2) * 1000
-
-    result = win32event.WaitForMultipleObjects([self._heventStop, self._heventHeartbeat], False, timeout)
-    if(win32event.WAIT_OBJECT_0 != result and win32event.WAIT_OBJECT_0 + 1 != result and win32event.WAIT_TIMEOUT != result):
-      raise FatalException(-1, "Error waiting for stop/heartbeat events: " + string(result))
-    if(win32event.WAIT_TIMEOUT == result):
-      return -1
-    return result - win32event.WAIT_OBJECT_0

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py b/ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py
deleted file mode 100644
index 2ef8c7f..0000000
--- a/ambari-agent/src/main/python/ambari_agent/HeartbeatStopHandler_linux.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import logging
-import signal
-import threading
-import traceback
-
-
-logger = logging.getLogger()
-
-_handler = None
-
-def signal_handler(signum, frame):
-  _handler.set_stop()
-
-def bind_signal_handlers(agentPid):
-  if os.getpid() == agentPid:
-    signal.signal(signal.SIGINT, signal_handler)
-    signal.signal(signal.SIGTERM, signal_handler)
-    signal.signal(signal.SIGUSR1, debug)
-
-  global _handler
-  _handler = HeartbeatStopHandler()
-
-  return _handler
-
-def debug(sig, frame):
-  """Interrupt running process, and provide a python prompt for
-  interactive debugging."""
-  d={'_frame':frame}         # Allow access to frame object.
-  d.update(frame.f_globals)  # Unless shadowed by global
-  d.update(frame.f_locals)
-
-  message  = "Signal received : entering python shell.\nTraceback:\n"
-  message += ''.join(traceback.format_stack(frame))
-  logger.info(message)
-
-class HeartbeatStopHandler:
-  def __init__(self, stopEvent = None):
-    # Event is used for synchronizing heartbeat iterations (to make possible
-    # manual wait() interruption between heartbeats )
-    self.heartbeat_wait_event = threading.Event()
-
-    # Event is used to stop the Agent process
-    if stopEvent is None:
-      #Allow standalone testing
-      self.stop_event = threading.Event()
-    else:
-      #Allow one unique event per process
-      self.stop_event = stopEvent
-
-  def set_heartbeat(self):
-    self.heartbeat_wait_event.set()
-
-  def reset_heartbeat(self):
-    self.heartbeat_wait_event.clear()
-
-  def set_stop(self):
-    self.stop_event.set()
-
-  def wait(self, timeout1, timeout2 = 0):
-    if self.heartbeat_wait_event.wait(timeout = timeout1):
-      #Event signaled, exit
-      return 0
-    # Stop loop when stop event received
-    # Otherwise sleep a bit more to allow STATUS_COMMAND results to be collected
-    # and sent in one heartbeat. Also avoid server overload with heartbeats
-    if self.stop_event.wait(timeout = timeout2):
-      logger.info("Stop event received")
-      return 1
-    #Timeout
-    return -1

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/HostCleanup.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostCleanup.py b/ambari-agent/src/main/python/ambari_agent/HostCleanup.py
index 570c320..35e7b51 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostCleanup.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostCleanup.py
@@ -33,8 +33,10 @@ import optparse
 import shlex
 import datetime
 from AmbariConfig import AmbariConfig
-from pwd import getpwnam
-from ambari_commons import OSCheck
+from ambari_commons import OSCheck, OSConst
+if OSCheck.get_os_family() != OSConst.WINSRV_FAMILY:
+  from pwd import getpwnam
+
 
 logger = logging.getLogger()
 
@@ -90,8 +92,8 @@ class HostCleanup:
   def resolve_ambari_config(self):
     try:
       config = AmbariConfig()
-      if os.path.exists(AmbariConfig.CONFIG_FILE):
-        config.read(AmbariConfig.CONFIG_FILE)
+      if os.path.exists(AmbariConfig.getConfigFile()):
+        config.read(AmbariConfig.getConfigFile())
       else:
         raise Exception("No config found, use default")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/HostInfo.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
index 7a1e50a..0b80ffe 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
@@ -18,11 +18,507 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
+import os
+import glob
+import logging
+import re
+import time
+import subprocess
+import threading
+import shlex
 import platform
+import hostname
+from PackagesAnalyzer import PackagesAnalyzer
+from HostCheckReportFileHandler import HostCheckReportFileHandler
+from Hardware import Hardware
+from ambari_commons import OSCheck, OSConst, Firewall
+import socket
+from ambari_commons.os_family_impl import OsFamilyImpl
 
-if platform.system() == "Windows":
-  import HostInfo_win
-  HostInfo = HostInfo_win.HostInfo
-else:
-  import HostInfo_linux
-  HostInfo = HostInfo_linux.HostInfo
+logger = logging.getLogger()
+
+# service cmd
+SERVICE_CMD = "service"
+
+
+class HostInfo(object):
+  # Filters used to identify processed
+  PROC_FILTER = [
+    "hadoop", "zookeeper"
+  ]
+
+  current_umask = -1
+
+  def __init__(self, config=None):
+    self.config = config
+    self.reportFileHandler = HostCheckReportFileHandler(config)
+
+  def dirType(self, path):
+    if not os.path.exists(path):
+      return 'not_exist'
+    elif os.path.islink(path):
+      return 'sym_link'
+    elif os.path.isdir(path):
+      return 'directory'
+    elif os.path.isfile(path):
+      return 'file'
+    return 'unknown'
+
+  def checkLiveServices(self, services, result):
+    osType = OSCheck.get_os_family()
+    for service in services:
+      svcCheckResult = {}
+      if isinstance(service, dict):
+        serviceName = service[osType]
+      else:
+        serviceName = service
+      svcCheckResult['name'] = serviceName
+      svcCheckResult['status'] = "UNKNOWN"
+      svcCheckResult['desc'] = ""
+      try:
+        out, err, code = self.getServiceStatus(serviceName)
+        if 0 != code:
+          svcCheckResult['status'] = "Unhealthy"
+          svcCheckResult['desc'] = out
+          if len(out) == 0:
+            svcCheckResult['desc'] = err
+        else:
+          svcCheckResult['status'] = "Healthy"
+      except Exception, e:
+        svcCheckResult['status'] = "Unhealthy"
+        svcCheckResult['desc'] = repr(e)
+      result.append(svcCheckResult)
+
+  def getUMask(self):
+    if (self.current_umask == -1):
+      self.current_umask = os.umask(self.current_umask)
+      os.umask(self.current_umask)
+      return self.current_umask
+    else:
+      return self.current_umask
+
+  def checkReverseLookup(self):
+    """
+    Check if host fqdn resolves to current host ip
+    """
+    try:
+      host_name = socket.gethostname().lower()
+      host_ip = socket.gethostbyname(host_name)
+      host_fqdn = socket.getfqdn().lower()
+      fqdn_ip = socket.gethostbyname(host_fqdn)
+      return host_ip == fqdn_ip
+    except socket.error:
+      pass
+    return False
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HostInfoLinux(HostInfo):
+  # List of project names to be used to find alternatives folders etc.
+  DEFAULT_PROJECT_NAMES = [
+    "hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia",
+    "oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
+    "storm", "hive-hcatalog", "tez", "falcon", "ambari_qa", "hadoop_deploy",
+    "rrdcached", "hcat", "ambari-qa", "sqoop-ambari-qa", "sqoop-ambari_qa",
+    "webhcat", "hadoop-hdfs", "hadoop-yarn", "hadoop-mapreduce"
+  ]
+
+  # List of live services checked for on the host, takes a map of plan strings
+  DEFAULT_LIVE_SERVICES = [
+    {OSConst.REDHAT_FAMILY: "ntpd", OSConst.SUSE_FAMILY: "ntp", OSConst.UBUNTU_FAMILY: "ntp"}
+  ]
+
+  # Set of default users (need to be replaced with the configured user names)
+  DEFAULT_USERS = [
+    "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
+    "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
+    "hue", "yarn", "tez", "storm", "falcon", "kafka", "knox"
+  ]
+
+  # Default set of directories that are checked for existence of files and folders
+  DEFAULT_DIRS = [
+    "/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
+  ]
+
+  # Packages that are used to find repos (then repos are used to find other packages)
+  PACKAGES = [
+    "hadoop_2_2_*", "hadoop-2-2-.*", "zookeeper_2_2_*", "zookeeper-2-2-.*",
+    "hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
+  ]
+
+  # Additional packages to look for (search packages that start with these)
+  ADDITIONAL_PACKAGES = [
+    "rrdtool", "rrdtool-python", "ganglia", "gmond", "gweb", "libconfuse",
+    "ambari-log4j", "hadoop", "zookeeper", "oozie", "webhcat"
+  ]
+
+  # ignore packages from repos whose names start with these strings
+  IGNORE_PACKAGES_FROM_REPOS = [
+    "ambari", "installed"
+  ]
+
+  # ignore required packages
+  IGNORE_PACKAGES = [
+    "epel-release"
+  ]
+
+  # ignore repos from the list of repos to be cleaned
+  IGNORE_REPOS = [
+    "ambari", "HDP-UTILS"
+  ]
+
+  RESULT_UNAVAILABLE = "unable_to_determine"
+
+  DEFAULT_SERVICE_NAME = "ntpd"
+  SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
+
+  THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+
+  def __init__(self, config=None):
+    super(HostInfoLinux, self).__init__(config)
+    self.packages = PackagesAnalyzer()
+
+  def osdiskAvailableSpace(self, path):
+    diskInfo = {}
+    try:
+      df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
+      dfdata = df.communicate()[0]
+      return Hardware.extractMountInfo(dfdata.splitlines()[-1])
+    except:
+      pass
+    return diskInfo
+
+  def createAlerts(self, alerts):
+    existingUsers = []
+    self.checkUsers(self.DEFAULT_USERS, existingUsers)
+    dirs = []
+    self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
+    alert = {
+      'name': 'host_alert',
+      'instance': None,
+      'service': 'AMBARI',
+      'component': 'host',
+      'host': hostname.hostname(self.config),
+      'state': 'OK',
+      'label': 'Disk space',
+      'text': 'Used disk space less than 80%'}
+    message = ""
+    mountinfoSet = []
+    for dir in dirs:
+      if dir["type"] == 'directory':
+        mountinfo = self.osdiskAvailableSpace(dir['name'])
+        if int(mountinfo["percent"].strip('%')) >= 80:
+          if not mountinfo in mountinfoSet:
+            mountinfoSet.append(mountinfo)
+          message += str(dir['name']) + ";\n"
+
+    if message != "":
+      message = "These discs have low space:\n" + str(
+        mountinfoSet) + "\n They include following critical directories:\n" + message
+      alert['state'] = 'WARNING'
+      alert['text'] = message
+    alerts.append(alert)
+    return alerts
+
+  def checkUsers(self, users, results):
+    f = open('/etc/passwd', 'r')
+    for userLine in f:
+      fields = userLine.split(":")
+      if fields[0] in users:
+        result = {}
+        homeDir = fields[5]
+        result['name'] = fields[0]
+        result['homeDir'] = fields[5]
+        result['status'] = "Available"
+        if not os.path.exists(homeDir):
+          result['status'] = "Invalid home directory"
+        results.append(result)
+
+  def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
+    foldersToIgnore = []
+    for user in existingUsers:
+      foldersToIgnore.append(user['homeDir'])
+    try:
+      for dirName in basePaths:
+        for project in projectNames:
+          path = os.path.join(dirName.strip(), project.strip())
+          if not path in foldersToIgnore and os.path.exists(path):
+            obj = {}
+            obj['type'] = self.dirType(path)
+            obj['name'] = path
+            dirs.append(obj)
+    except:
+      pass
+
+  def javaProcs(self, list):
+    import pwd
+
+    try:
+      pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
+      for pid in pids:
+        cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
+        cmd = cmd.replace('\0', ' ')
+        if not 'AmbariServer' in cmd:
+          if 'java' in cmd:
+            dict = {}
+            dict['pid'] = int(pid)
+            dict['hadoop'] = False
+            for filter in self.PROC_FILTER:
+              if filter in cmd:
+                dict['hadoop'] = True
+            dict['command'] = cmd.strip()
+            for line in open(os.path.join('/proc', pid, 'status')):
+              if line.startswith('Uid:'):
+                uid = int(line.split()[1])
+                dict['user'] = pwd.getpwuid(uid).pw_name
+            list.append(dict)
+    except:
+      pass
+    pass
+
+  def getReposToRemove(self, repos, ignoreList):
+    reposToRemove = []
+    for repo in repos:
+      addToRemoveList = True
+      for ignoreRepo in ignoreList:
+        if self.packages.nameMatch(ignoreRepo, repo):
+          addToRemoveList = False
+          continue
+      if addToRemoveList:
+        reposToRemove.append(repo)
+    return reposToRemove
+
+  def getTransparentHugePage(self):
+    # This file exist only on redhat 6
+    thp_regex = "\[(.+)\]"
+    if os.path.isfile(self.THP_FILE):
+      with open(self.THP_FILE) as f:
+        file_content = f.read()
+        return re.search(thp_regex, file_content).groups()[0]
+    else:
+      return ""
+
+  def checkIptables(self):
+    return Firewall().getFirewallObject().check_iptables()
+
+  def hadoopVarRunCount(self):
+    if not os.path.exists('/var/run/hadoop'):
+      return 0
+    pids = glob.glob('/var/run/hadoop/*/*.pid')
+    return len(pids)
+
+  def hadoopVarLogCount(self):
+    if not os.path.exists('/var/log/hadoop'):
+      return 0
+    logs = glob.glob('/var/log/hadoop/*/*.log')
+    return len(logs)
+
+  def etcAlternativesConf(self, projects, etcResults):
+    if not os.path.exists('/etc/alternatives'):
+      return []
+    projectRegex = "'" + '|'.join(projects) + "'"
+    files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
+    for conf in files:
+      result = {}
+      filePath = os.path.join('/etc/alternatives', conf)
+      if os.path.islink(filePath):
+        realConf = os.path.realpath(filePath)
+        result['name'] = conf
+        result['target'] = realConf
+        etcResults.append(result)
+
+  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+    """ Return various details about the host
+    componentsMapped: indicates if any components are mapped to this host
+    commandsInProgress: indicates if any commands are in progress
+    """
+
+    dict['hostHealth'] = {}
+
+    java = []
+    self.javaProcs(java)
+    dict['hostHealth']['activeJavaProcs'] = java
+
+    liveSvcs = []
+    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
+    dict['hostHealth']['liveServices'] = liveSvcs
+
+    dict['umask'] = str(self.getUMask())
+
+    dict['transparentHugePage'] = self.getTransparentHugePage()
+    dict['iptablesIsRunning'] = self.checkIptables()
+    dict['reverseLookup'] = self.checkReverseLookup()
+    # If commands are in progress or components are already mapped to this host
+    # Then do not perform certain expensive host checks
+    if componentsMapped or commandsInProgress:
+      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
+      dict['installedPackages'] = []
+      dict['alternatives'] = []
+      dict['stackFoldersAndFiles'] = []
+      dict['existingUsers'] = []
+
+    else:
+      etcs = []
+      self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
+      dict['alternatives'] = etcs
+
+      existingUsers = []
+      self.checkUsers(self.DEFAULT_USERS, existingUsers)
+      dict['existingUsers'] = existingUsers
+
+      dirs = []
+      self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
+      dict['stackFoldersAndFiles'] = dirs
+
+      installedPackages = []
+      availablePackages = []
+      self.packages.allInstalledPackages(installedPackages)
+      self.packages.allAvailablePackages(availablePackages)
+
+      repos = []
+      self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
+                                      self.IGNORE_PACKAGES_FROM_REPOS, repos)
+      packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
+      additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
+        self.ADDITIONAL_PACKAGES, installedPackages)
+      allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
+      dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
+
+      repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
+      dict['existingRepos'] = repos
+
+      self.reportFileHandler.writeHostCheckFile(dict)
+      pass
+
+    # The time stamp must be recorded at the end
+    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+
+    pass
+
+  def getServiceStatus(self, serivce_name):
+    service_check_live = shlex.split(self.SERVICE_STATUS_CMD)
+    service_check_live[1] = serivce_name
+    osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
+                              stderr=subprocess.PIPE)
+    out, err = osStat.communicate()
+    return out, err, osStat.returncode
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HostInfoWindows(HostInfo):
+  SERVICE_STATUS_CMD = 'If ((Get-Service | Where-Object {{$_.Name -eq \'{0}\'}}).Status -eq \'Running\') {{echo "Running"; $host.SetShouldExit(0)}} Else {{echo "Stopped"; $host.SetShouldExit(1)}}'
+  GET_USERS_CMD = '$accounts=(Get-WmiObject -Class Win32_UserAccount -Namespace "root\cimv2" -Filter "LocalAccount=\'$True\'" -ComputerName "LocalHost" -ErrorAction Stop); foreach ($acc in $accounts) {echo $acc.Name}'
+  GET_JAVA_PROC_CMD = 'foreach ($process in (gwmi Win32_Process -Filter "name = \'java.exe\'")){echo $process.ProcessId;echo $process.CommandLine; echo $process.GetOwner().User}'
+  DEFAULT_LIVE_SERVICES = [
+    {OSConst.WINSRV_FAMILY: "W32Time"}
+  ]
+  def checkUsers(self, users, results):
+    get_users_cmd = ["powershell", '-noProfile', '-NonInteractive', '-nologo', "-Command", self.GET_USERS_CMD]
+    try:
+      osStat = subprocess.Popen(get_users_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+      out, err = osStat.communicate()
+    except:
+      raise Exception("Failed to get users.")
+    for user in out.split(os.linesep):
+      if user in users:
+        result = {}
+        result['name'] = user
+        result['status'] = "Available"
+        results.append(result)
+
+  def checkIptables(self):
+    from ambari_commons.os_windows import run_powershell_script, CHECK_FIREWALL_SCRIPT
+
+    out = run_powershell_script(CHECK_FIREWALL_SCRIPT)
+    if out[0] != 0:
+      logger.warn("Unable to check firewall status:{0}".format(out[2]))
+      return False
+    profiles_status = [i for i in out[1].split("\n") if not i == ""]
+    if "1" in profiles_status:
+      return True
+    return False
+
+  def createAlerts(self, alerts):
+    # TODO AMBARI-7849 Implement createAlerts for Windows
+    return alerts
+
+  def javaProcs(self, list):
+    try:
+      from ambari_commons.os_windows import run_powershell_script
+
+      code, out, err = run_powershell_script(self.GET_JAVA_PROC_CMD)
+      if code == 0:
+        splitted_output = out.split(os.linesep)
+        for i in [index for index in range(0, len(splitted_output)) if (index % 3) == 0]:
+          pid = splitted_output[i]
+          cmd = splitted_output[i + 1]
+          user = splitted_output[i + 2]
+          if not 'AmbariServer' in cmd:
+            if 'java' in cmd:
+              dict = {}
+              dict['pid'] = int(pid)
+              dict['hadoop'] = False
+              for filter in self.PROC_FILTER:
+                if filter in cmd:
+                  dict['hadoop'] = True
+              dict['command'] = cmd.strip()
+              dict['user'] = user
+              list.append(dict)
+    except Exception as e:
+      pass
+    pass
+
+  def getServiceStatus(self, serivce_name):
+    from ambari_commons.os_windows import run_powershell_script
+    code, out, err = run_powershell_script(self.SERVICE_STATUS_CMD.format(serivce_name))
+    return out, err, code
+
+  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+    """ Return various details about the host
+    componentsMapped: indicates if any components are mapped to this host
+    commandsInProgress: indicates if any commands are in progress
+    """
+    dict['hostHealth'] = {}
+
+    java = []
+    self.javaProcs(java)
+    dict['hostHealth']['activeJavaProcs'] = java
+
+    liveSvcs = []
+    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
+    dict['hostHealth']['liveServices'] = liveSvcs
+
+    dict['umask'] = str(self.getUMask())
+
+    dict['iptablesIsRunning'] = self.checkIptables()
+    dict['reverseLookup'] = self.checkReverseLookup()
+    # If commands are in progress or components are already mapped to this host
+    # Then do not perform certain expensive host checks
+    if componentsMapped or commandsInProgress:
+      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
+      dict['installedPackages'] = []
+      dict['alternatives'] = []
+      dict['stackFoldersAndFiles'] = []
+      dict['existingUsers'] = []
+    else:
+      existingUsers = []
+      self.checkUsers(self.DEFAULT_USERS, existingUsers)
+      dict['existingUsers'] = existingUsers
+      # TODO check HDP stack and folders here
+      self.reportFileHandler.writeHostCheckFile(dict)
+      pass
+
+    # The time stamp must be recorded at the end
+    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+
+
+
+def main(argv=None):
+  h = HostInfo()
+  struct = {}
+  h.register(struct)
+  print struct
+
+
+if __name__ == '__main__':
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py b/ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py
deleted file mode 100644
index 2074a97..0000000
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo_linux.py
+++ /dev/null
@@ -1,380 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import glob
-import logging
-import pwd
-import re
-import time
-import subprocess
-import threading
-import shlex
-import platform
-import hostname
-from PackagesAnalyzer import PackagesAnalyzer
-from HostCheckReportFileHandler import HostCheckReportFileHandler
-from Hardware import Hardware
-from ambari_commons import OSCheck, OSConst, Firewall
-import socket
-
-logger = logging.getLogger()
-
-# service cmd
-SERVICE_CMD = "service"
-
-
-class HostInfo:
-  # List of project names to be used to find alternatives folders etc.
-  DEFAULT_PROJECT_NAMES = [
-    "hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia",
-    "oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
-    "storm", "hive-hcatalog", "tez", "falcon", "ambari_qa", "hadoop_deploy",
-    "rrdcached", "hcat", "ambari-qa", "sqoop-ambari-qa", "sqoop-ambari_qa",
-    "webhcat", "hadoop-hdfs", "hadoop-yarn", "hadoop-mapreduce"
-  ]
-
-  # List of live services checked for on the host, takes a map of plan strings
-  DEFAULT_LIVE_SERVICES = [
-    {OSConst.REDHAT_FAMILY: "ntpd", OSConst.SUSE_FAMILY: "ntp", OSConst.UBUNTU_FAMILY: "ntp"}
-  ]
-
-  # Set of default users (need to be replaced with the configured user names)
-  DEFAULT_USERS = [
-    "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
-    "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
-    "hue", "yarn", "tez", "storm", "falcon", "kafka","knox"
-  ]
-
-  # Filters used to identify processed
-  PROC_FILTER = [
-    "hadoop", "zookeeper"
-  ]
-
-  # Additional path patterns to find existing directory
-  DIRNAME_PATTERNS = [
-    "/tmp/hadoop-", "/tmp/hsperfdata_"
-  ]
-
-  # Default set of directories that are checked for existence of files and folders
-  DEFAULT_DIRS = [
-    "/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
-  ]
-
-  # Packages that are used to find repos (then repos are used to find other packages)
-  PACKAGES = [
-    "hadoop_2_2_*","hadoop-2-2-.*","zookeeper_2_2_*","zookeeper-2-2-.*",
-    "hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
-  ]
-
-  # Additional packages to look for (search packages that start with these)
-  ADDITIONAL_PACKAGES = [
-    "rrdtool", "rrdtool-python", "ganglia", "gmond", "gweb", "libconfuse",
-    "ambari-log4j", "hadoop", "zookeeper", "oozie", "webhcat"
-  ]
-
-  # ignore packages from repos whose names start with these strings
-  IGNORE_PACKAGES_FROM_REPOS = [
-    "ambari", "installed"
-  ]
-
-  # ignore required packages
-  IGNORE_PACKAGES = [
-    "epel-release"
-  ]
-
-  # ignore repos from the list of repos to be cleaned
-  IGNORE_REPOS = [
-    "ambari", "HDP-UTILS"
-  ]
-
-  # default timeout for async invoked processes
-  TIMEOUT_SECONDS = 60
-  RESULT_UNAVAILABLE = "unable_to_determine"
-
-  DEFAULT_SERVICE_NAME = "ntpd"
-  SERVICE_STATUS_CMD = "%s %s status" % (SERVICE_CMD, DEFAULT_SERVICE_NAME)
-
-  THP_FILE = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
-
-  event = threading.Event()
-
-  current_umask = -1
-
-  def __init__(self, config=None):
-    self.packages = PackagesAnalyzer()
-    self.config = config
-    self.reportFileHandler = HostCheckReportFileHandler(config)
-
-  def dirType(self, path):
-    if not os.path.exists(path):
-      return 'not_exist'
-    elif os.path.islink(path):
-      return 'sym_link'
-    elif os.path.isdir(path):
-      return 'directory'
-    elif os.path.isfile(path):
-      return 'file'
-    return 'unknown'
-
-  def hadoopVarRunCount(self):
-    if not os.path.exists('/var/run/hadoop'):
-      return 0
-    pids = glob.glob('/var/run/hadoop/*/*.pid')
-    return len(pids)
-
-  def hadoopVarLogCount(self):
-    if not os.path.exists('/var/log/hadoop'):
-      return 0
-    logs = glob.glob('/var/log/hadoop/*/*.log')
-    return len(logs)
-
-  def etcAlternativesConf(self, projects, etcResults):
-    if not os.path.exists('/etc/alternatives'):
-      return []
-    projectRegex = "'" + '|'.join(projects) + "'"
-    files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
-    for conf in files:
-      result = {}
-      filePath = os.path.join('/etc/alternatives', conf)
-      if os.path.islink(filePath):
-        realConf = os.path.realpath(filePath)
-        result['name'] = conf
-        result['target'] = realConf
-        etcResults.append(result)
-
-  def checkLiveServices(self, services, result):
-    osType = OSCheck.get_os_family()
-    for service in services:
-      svcCheckResult = {}
-      if isinstance(service, dict):
-        serviceName = service[osType]
-      else:
-        serviceName = service
-
-      service_check_live = shlex.split(self.SERVICE_STATUS_CMD)
-      service_check_live[1] = serviceName
-
-      svcCheckResult['name'] = serviceName
-      svcCheckResult['status'] = "UNKNOWN"
-      svcCheckResult['desc'] = ""
-      try:
-        osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
-          stderr=subprocess.PIPE)
-        out, err = osStat.communicate()
-        if 0 != osStat.returncode:
-          svcCheckResult['status'] = "Unhealthy"
-          svcCheckResult['desc'] = out
-          if len(out) == 0:
-            svcCheckResult['desc'] = err
-        else:
-          svcCheckResult['status'] = "Healthy"
-      except Exception, e:
-        svcCheckResult['status'] = "Unhealthy"
-        svcCheckResult['desc'] = repr(e)
-      result.append(svcCheckResult)
-
-  def checkUsers(self, users, results):
-    f = open('/etc/passwd', 'r')
-    for userLine in f:
-      fields = userLine.split(":")
-      if fields[0] in users:
-        result = {}
-        homeDir = fields[5]
-        result['name'] = fields[0]
-        result['homeDir'] = fields[5]
-        result['status'] = "Available"
-        if not os.path.exists(homeDir):
-          result['status'] = "Invalid home directory"
-        results.append(result)
-
-  def osdiskAvailableSpace(self, path):
-    diskInfo = {}
-    try:
-      df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
-      dfdata = df.communicate()[0]
-      return Hardware.extractMountInfo(dfdata.splitlines()[-1])
-    except:
-      pass
-    return diskInfo
-
-  def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
-    foldersToIgnore = []
-    for user in existingUsers:
-      foldersToIgnore.append(user['homeDir'])
-    try:
-      for dirName in basePaths:
-        for project in projectNames:
-          path = os.path.join(dirName.strip(), project.strip())
-          if not path in foldersToIgnore and os.path.exists(path):
-            obj = {}
-            obj['type'] = self.dirType(path)
-            obj['name'] = path
-            dirs.append(obj)
-    except:
-      pass
-
-  def javaProcs(self, list):
-    try:
-      pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
-      for pid in pids:
-        cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
-        cmd = cmd.replace('\0', ' ')
-        if not 'AmbariServer' in cmd:
-          if 'java' in cmd:
-            dict = {}
-            dict['pid'] = int(pid)
-            dict['hadoop'] = False
-            for filter in self.PROC_FILTER:
-              if filter in cmd:
-                dict['hadoop'] = True
-            dict['command'] = cmd.strip()
-            for line in open(os.path.join('/proc', pid, 'status')):
-              if line.startswith('Uid:'):
-                uid = int(line.split()[1])
-                dict['user'] = pwd.getpwuid(uid).pw_name
-            list.append(dict)
-    except:
-      pass
-    pass
-
-  def getReposToRemove(self, repos, ignoreList):
-    reposToRemove = []
-    for repo in repos:
-      addToRemoveList = True
-      for ignoreRepo in ignoreList:
-        if self.packages.nameMatch(ignoreRepo, repo):
-          addToRemoveList = False
-          continue
-      if addToRemoveList:
-        reposToRemove.append(repo)
-    return reposToRemove
-
-  def getUMask(self):
-    if (self.current_umask == -1):
-      self.current_umask = os.umask(self.current_umask)
-      os.umask(self.current_umask)
-      return self.current_umask
-    else:
-      return self.current_umask
-
-  def getTransparentHugePage(self):
-    # This file exist only on redhat 6
-    thp_regex = "\[(.+)\]"
-    if os.path.isfile(self.THP_FILE):
-      with open(self.THP_FILE) as f:
-        file_content = f.read()
-        return re.search(thp_regex, file_content).groups()[0]
-    else:
-      return ""
-
-  def checkIptables(self):
-    return Firewall().getFirewallObject().check_iptables()
-
-  """ Return various details about the host
-  componentsMapped: indicates if any components are mapped to this host
-  commandsInProgress: indicates if any commands are in progress
-  """
-  def register(self, dict, componentsMapped=True, commandsInProgress=True):
-    dict['hostHealth'] = {}
-
-    java = []
-    self.javaProcs(java)
-    dict['hostHealth']['activeJavaProcs'] = java
-
-    liveSvcs = []
-    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
-    dict['hostHealth']['liveServices'] = liveSvcs
-
-    dict['umask'] = str(self.getUMask())
-
-    dict['transparentHugePage'] = self.getTransparentHugePage()
-    dict['iptablesIsRunning'] = self.checkIptables()
-    dict['reverseLookup'] = self.checkReverseLookup()
-    # If commands are in progress or components are already mapped to this host
-    # Then do not perform certain expensive host checks
-    if componentsMapped or commandsInProgress:
-      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
-      dict['installedPackages'] = []
-      dict['alternatives'] = []
-      dict['stackFoldersAndFiles'] = []
-      dict['existingUsers'] = []
-
-    else:
-      etcs = []
-      self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
-      dict['alternatives'] = etcs
-
-      existingUsers = []
-      self.checkUsers(self.DEFAULT_USERS, existingUsers)
-      dict['existingUsers'] = existingUsers
-
-      dirs = []
-      self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
-      dict['stackFoldersAndFiles'] = dirs
-
-      installedPackages = []
-      availablePackages = []
-      self.packages.allInstalledPackages(installedPackages)
-      self.packages.allAvailablePackages(availablePackages)
-
-      repos = []
-      self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
-        self.IGNORE_PACKAGES_FROM_REPOS, repos)
-      packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
-      additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
-        self.ADDITIONAL_PACKAGES, installedPackages)
-      allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
-      dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
-
-      repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
-      dict['existingRepos'] = repos
-
-      self.reportFileHandler.writeHostCheckFile(dict)
-      pass
-
-    # The time stamp must be recorded at the end
-    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
-
-    pass
-
-  def checkReverseLookup(self):
-    """
-    Check if host fqdn resolves to current host ip
-    """
-    try:
-      host_name = socket.gethostname()
-      host_ip = socket.gethostbyname(host_name)
-      host_fqdn = socket.getfqdn()
-      fqdn_ip = socket.gethostbyname(host_fqdn)
-      return host_ip == fqdn_ip
-    except socket.error:
-      pass
-    return False
-
-def main(argv=None):
-  h = HostInfo()
-  struct = {}
-  h.register(struct)
-  print struct
-
-
-if __name__ == '__main__':
-  main()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/HostInfo_win.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo_win.py b/ambari-agent/src/main/python/ambari_agent/HostInfo_win.py
deleted file mode 100644
index 05e0caf..0000000
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo_win.py
+++ /dev/null
@@ -1,227 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import logging
-import time
-import subprocess
-from HostCheckReportFileHandler import HostCheckReportFileHandler
-from shell import shellRunner
-from ambari_commons.os_check import OSCheck, OSConst
-from ambari_commons.os_windows import run_powershell_script, CHECK_FIREWALL_SCRIPT
-import socket
-
-logger = logging.getLogger()
-
-# OS info
-OS_VERSION = OSCheck().get_os_major_version()
-OS_TYPE = OSCheck.get_os_type()
-OS_FAMILY = OSCheck.get_os_family()
-
-class HostInfo:
-  # List of live services checked for on the host, takes a map of plan strings
-  DEFAULT_LIVE_SERVICES = [
-    {OSConst.WINSRV_FAMILY: "W32Time"}
-  ]
-
-  # Set of default users (need to be replaced with the configured user names)
-  DEFAULT_USERS = [
-    "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
-    "hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
-    "hue", "yarn"
-  ]
-
-  # Filters used to identify processed
-  PROC_FILTER = [
-    "hadoop", "zookeeper"
-  ]
-
-  RESULT_UNAVAILABLE = "unable_to_determine"
-
-  SERVICE_STATUS_CMD = 'If ((Get-Service | Where-Object {{$_.Name -eq \'{0}\'}}).Status -eq \'Running\') {{echo "Running"; $host.SetShouldExit(0)}} Else {{echo "Stopped"; $host.SetShouldExit(1)}}'
-  GET_USERS_CMD = '$accounts=(Get-WmiObject -Class Win32_UserAccount -Namespace "root\cimv2" -Filter "LocalAccount=\'$True\'" -ComputerName "LocalHost" -ErrorAction Stop); foreach ($acc in $accounts) {echo $acc.Name}'
-  GET_JAVA_PROC_CMD = 'foreach ($process in (gwmi Win32_Process -Filter "name = \'java.exe\'")){echo $process.ProcessId;echo $process.CommandLine; echo $process.GetOwner().User}'
-
-  current_umask = -1
-
-  def __init__(self, config=None):
-    self.reportFileHandler = HostCheckReportFileHandler(config)
-
-  def dirType(self, path):
-    if not os.path.exists(path):
-      return 'not_exist'
-    elif os.path.islink(path):
-      return 'sym_link'
-    elif os.path.isdir(path):
-      return 'directory'
-    elif os.path.isfile(path):
-      return 'file'
-    return 'unknown'
-
-  def checkLiveServices(self, services, result):
-    osType = OSCheck.get_os_family()
-    for service in services:
-      svcCheckResult = {}
-      if isinstance(service, dict):
-        serviceName = service[osType]
-      else:
-        serviceName = service
-
-      service_check_live = ["powershell",'-noProfile', '-NonInteractive',  '-nologo', "-Command", self.SERVICE_STATUS_CMD.format(serviceName)]
-      svcCheckResult['name'] = serviceName
-      svcCheckResult['status'] = "UNKNOWN"
-      svcCheckResult['desc'] = ""
-      try:
-        osStat = subprocess.Popen(service_check_live, stdout=subprocess.PIPE,
-                                  stderr=subprocess.PIPE)
-        out, err = osStat.communicate()
-        if 0 != osStat.returncode:
-          svcCheckResult['status'] = "Unhealthy"
-          svcCheckResult['desc'] = out
-          if len(out) == 0:
-            svcCheckResult['desc'] = err
-        else:
-          svcCheckResult['status'] = "Healthy"
-      except Exception, e:
-        svcCheckResult['status'] = "Unhealthy"
-        svcCheckResult['desc'] = repr(e)
-      result.append(svcCheckResult)
-
-  #TODO get user directory
-  def checkUsers(self, users, results):
-    get_users_cmd = ["powershell",'-noProfile', '-NonInteractive',  '-nologo', "-Command", self.GET_USERS_CMD]
-    try:
-      osStat = subprocess.Popen(get_users_cmd, stdout=subprocess.PIPE,                               stderr=subprocess.PIPE)
-      out, err = osStat.communicate()
-    except:
-      raise Exception("Failed to get users.")
-    for user in out.split(os.linesep):
-      if user in users:
-        result = {}
-        result['name'] = user
-        result['status'] = "Available"
-        results.append(result)
-
-  def javaProcs(self, list):
-    try:
-      runner = shellRunner()
-      command_result = runner.run(["powershell",'-noProfile', '-NonInteractive',  '-nologo', "-Command", self.GET_JAVA_PROC_CMD])
-      if command_result["exitCode"] == 0:
-        splitted_output = command_result["output"].split(os.linesep)
-        for i in [index for index in range(0,len(splitted_output)) if (index % 3)==0]:
-          pid = splitted_output[i]
-          cmd = splitted_output[i+1]
-          user = splitted_output[i+2]
-          if not 'AmbariServer' in cmd:
-            if 'java' in cmd:
-              dict = {}
-              dict['pid'] = int(pid)
-              dict['hadoop'] = False
-              for filter in self.PROC_FILTER:
-                if filter in cmd:
-                  dict['hadoop'] = True
-              dict['command'] = cmd.strip()
-              dict['user'] = user
-              list.append(dict)
-    except Exception as e:
-      pass
-    pass
-
-  def getUMask(self):
-    if (self.current_umask == -1):
-      self.current_umask = os.umask(self.current_umask)
-      os.umask(self.current_umask)
-      return self.current_umask
-    else:
-      return self.current_umask
-
-  def checkIptables(self):
-    out = run_powershell_script(CHECK_FIREWALL_SCRIPT)
-    if out[0] != 0:
-      logger.warn("Unable to check firewall status:{0}".format(out[2]))
-      return False
-    profiles_status = [i for i in out[1].split("\n") if not i == ""]
-    if "1" in profiles_status:
-      return True
-    return False
-
-  """ Return various details about the host
-  componentsMapped: indicates if any components are mapped to this host
-  commandsInProgress: indicates if any commands are in progress
-  """
-  def register(self, dict, componentsMapped=True, commandsInProgress=True):
-    dict['hostHealth'] = {}
-
-    java = []
-    self.javaProcs(java)
-    dict['hostHealth']['activeJavaProcs'] = java
-
-    liveSvcs = []
-    self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
-    dict['hostHealth']['liveServices'] = liveSvcs
-
-    dict['umask'] = str(self.getUMask())
-
-    dict['iptablesIsRunning'] = self.checkIptables()
-    dict['reverseLookup'] = self.checkReverseLookup()
-    # If commands are in progress or components are already mapped to this host
-    # Then do not perform certain expensive host checks
-    if componentsMapped or commandsInProgress:
-      dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
-      dict['installedPackages'] = []
-      dict['alternatives'] = []
-      dict['stackFoldersAndFiles'] = []
-      dict['existingUsers'] = []
-    else:
-      existingUsers = []
-      self.checkUsers(self.DEFAULT_USERS, existingUsers)
-      dict['existingUsers'] = existingUsers
-      #TODO check HDP stack and folders here
-      self.reportFileHandler.writeHostCheckFile(dict)
-      pass
-
-    # The time stamp must be recorded at the end
-    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
-
-    pass
-
-  def checkReverseLookup(self):
-    """
-    Check if host fqdn resolves to current host ip
-    """
-    try:
-      host_name = socket.gethostname().lower()
-      host_ip = socket.gethostbyname(host_name)
-      host_fqdn = socket.getfqdn().lower()
-      fqdn_ip = socket.gethostbyname(host_fqdn)
-      return host_ip == fqdn_ip
-    except socket.error:
-      pass
-    return False
-
-def main(argv=None):
-  h = HostInfo()
-  struct = {}
-  h.register(struct)
-  print struct
-
-
-if __name__ == '__main__':
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/15c65b93/ambari-agent/src/main/python/ambari_agent/NetUtil.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/NetUtil.py b/ambari-agent/src/main/python/ambari_agent/NetUtil.py
index 79d5343..f657140 100644
--- a/ambari-agent/src/main/python/ambari_agent/NetUtil.py
+++ b/ambari-agent/src/main/python/ambari_agent/NetUtil.py
@@ -19,6 +19,7 @@ import logging
 import httplib
 from ssl import SSLError
 import platform
+from HeartbeatHandlers import HeartbeatStopHandlers
 
 ERROR_SSL_WRONG_VERSION = "SSLError: Failed to connect. Please check openssl library versions. \n" +\
               "Refer to: https://bugzilla.redhat.com/show_bug.cgi?id=1022468 for more details."
@@ -47,13 +48,7 @@ class NetUtil:
 
   def __init__(self, stop_callback=None):
     if stop_callback is None:
-      IS_WINDOWS = platform.system() == "Windows"
-      if IS_WINDOWS:
-        from HeartbeatHandlers_windows import HeartbeatStopHandler
-      else:
-        from HeartbeatStopHandler_linux import HeartbeatStopHandler
-      stop_callback = HeartbeatStopHandler()
-
+      stop_callback = HeartbeatStopHandlers()
     self.stopCallback = stop_callback
 
   def checkURL(self, url):