You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by vb...@apache.org on 2017/10/06 07:41:42 UTC

[1/2] ambari git commit: AMBARI-22124. Refactor AMS logic in stack advisors to service advisors.(vbrodetskyi)

Repository: ambari
Updated Branches:
  refs/heads/trunk 9adfcdcbd -> 0f32765dc


http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py b/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
new file mode 100644
index 0000000..05254fe
--- /dev/null
+++ b/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
@@ -0,0 +1,596 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import imp
+import json
+import os
+from unittest import TestCase
+
+from mock.mock import patch, MagicMock
+
+
+class TestAMBARI_METRICS010ServiceAdvisor(TestCase):
+
+  testDirectory = os.path.dirname(os.path.abspath(__file__))
+  stack_advisor_path = os.path.join(testDirectory, '../../../../main/resources/stacks/stack_advisor.py')
+  with open(stack_advisor_path, 'rb') as fp:
+    imp.load_module('stack_advisor', fp, stack_advisor_path, ('.py', 'rb', imp.PY_SOURCE))
+
+  serviceAdvisorPath = '../../../../main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py'
+  ambariMetrics010ServiceAdvisorPath = os.path.join(testDirectory, serviceAdvisorPath)
+  with open(ambariMetrics010ServiceAdvisorPath, 'rb') as fp:
+    service_advisor_impl = imp.load_module('service_advisor_impl', fp, ambariMetrics010ServiceAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+
+  def setUp(self):
+    serviceAdvisorClass = getattr(self.service_advisor_impl, 'AMBARI_METRICSServiceAdvisor')
+    self.serviceAdvisor = serviceAdvisorClass()
+
+
+  def test_recommendAmsConfigurations(self):
+    configurations = {
+      "hadoop-env": {
+        "properties": {
+          "hdfs_user": "hdfs",
+          "proxyuser_group": "users"
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "href": "/api/v1/hosts/host1",
+          "Hosts": {
+            "cpu_count": 1,
+            "host_name": "c6401.ambari.apache.org",
+            "os_arch": "x86_64",
+            "os_type": "centos6",
+            "ph_cpu_count": 1,
+            "public_host_name": "public.c6401.ambari.apache.org",
+            "rack_info": "/default-rack",
+            "total_mem": 2097152,
+            "disk_info": [{
+              "size": '80000000',
+              "mountpoint": "/"
+            }]
+          }
+        },
+        {
+          "href": "/api/v1/hosts/host2",
+          "Hosts": {
+            "cpu_count": 1,
+            "host_name": "c6402.ambari.apache.org",
+            "os_arch": "x86_64",
+            "os_type": "centos6",
+            "ph_cpu_count": 1,
+            "public_host_name": "public.c6402.ambari.apache.org",
+            "rack_info": "/default-rack",
+            "total_mem": 1048576,
+            "disk_info": [{
+              "size": '800000000',
+              "mountpoint": "/"
+            }]
+          }
+        }
+      ]}
+
+
+    services1 = {
+      "services": [
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          }, "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NAMENODE",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+        },
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
+              }
+            }
+          ]
+        }],
+      "configurations": configurations,
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
+    }
+
+    clusterData = {
+      "totalAvailableRam": 2048
+    }
+
+    expected = {'ams-env': {'properties': {'metrics_collector_heapsize': '512'}},
+                  'ams-grafana-env': {'properties': {},
+                                                             'property_attributes': {'metrics_grafana_password': {'visible': 'false'}}},
+                  'ams-hbase-env': {'properties': {'hbase_log_dir': '/var/log/ambari-metrics-collector',
+                                                                                       'hbase_master_heapsize': '512',
+                                                                                       'hbase_master_xmn_size': '102',
+                                                                                       'hbase_regionserver_heapsize': '1024',
+                                                                                       'regionserver_xmn_size': '128'}},
+                  'ams-hbase-site': {'properties': {'hbase.cluster.distributed': 'true',
+                                                                                         'hbase.hregion.memstore.flush.size': '134217728',
+                                                                                         'hbase.regionserver.global.memstore.lowerLimit': '0.3',
+                                                                                         'hbase.regionserver.global.memstore.upperLimit': '0.35',
+                                                                                         'hbase.rootdir': '/user/ams/hbase',
+                                                                                         'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase-tmp',
+                                                                                         'hbase.zookeeper.property.clientPort': '2181',
+                                                                                         'hfile.block.cache.size': '0.3'}},
+                  'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
+                                                                             'timeline.metrics.cache.size': '100',
+                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.service.handler.thread.count': '20',
+                                                                             'timeline.metrics.service.operation.mode': 'distributed',
+                                                                             'timeline.metrics.service.watcher.disabled': 'true',
+                                                                             'timeline.metrics.service.webapp.address': '0.0.0.0:6188'}},
+                  'hadoop-env': {'properties': {'hdfs_user': 'hdfs',
+                                                                                 'proxyuser_group': 'users'}}}
+
+    self.serviceAdvisor.getServiceConfigurationRecommendations(configurations, clusterData, services1, hosts)
+    self.assertEquals(configurations, expected)
+
+    services1 = {
+      "services": [
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          }, "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NAMENODE",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+        },
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["c6401.ambari.apache.org"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
+              }
+            }
+          ]
+        }],
+      "configurations": configurations,
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
+    }
+    expected = {'ams-env': {'properties': {'metrics_collector_heapsize': '512'}},
+                  'ams-grafana-env': {'properties': {},
+                                                             'property_attributes': {'metrics_grafana_password': {'visible': 'false'}}},
+                  'ams-hbase-env': {'properties': {'hbase_log_dir': '/var/log/ambari-metrics-collector',
+                                                                                       'hbase_master_heapsize': '512',
+                                                                                       'hbase_master_xmn_size': '102',
+                                                                                       'hbase_regionserver_heapsize': '1024',
+                                                                                       'regionserver_xmn_size': '128'}},
+                  'ams-hbase-site': {'properties': {'hbase.cluster.distributed': 'true',
+                                                                                         'hbase.hregion.memstore.flush.size': '134217728',
+                                                                                         'hbase.regionserver.global.memstore.lowerLimit': '0.3',
+                                                                                         'hbase.regionserver.global.memstore.upperLimit': '0.35',
+                                                                                         'hbase.rootdir': '/user/ams/hbase',
+                                                                                         'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase-tmp',
+                                                                                         'hbase.zookeeper.property.clientPort': '2181',
+                                                                                         'hfile.block.cache.size': '0.3',
+                                                                                         'phoenix.coprocessor.maxMetaDataCacheSize': '20480000'}},
+                  'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
+                                                                             'timeline.metrics.cache.size': '100',
+                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.service.handler.thread.count': '20',
+                                                                             'timeline.metrics.service.operation.mode': 'distributed',
+                                                                             'timeline.metrics.service.watcher.disabled': 'true',
+                                                                             'timeline.metrics.service.webapp.address': '0.0.0.0:6188'}},
+                  'hadoop-env': {'properties': {'hdfs_user': 'hdfs',
+                                                                                 'proxyuser_group': 'users'}}}
+    self.serviceAdvisor.getServiceConfigurationRecommendations(configurations, clusterData, services1, hosts)
+    self.assertEquals(configurations, expected)
+
+
+  def test_validateAmsSiteConfigurations(self):
+    configurations = {
+      "hdfs-site": {
+        "properties": {
+          'dfs.datanode.data.dir': "/hadoop/data"
+        }
+      },
+      "core-site": {
+        "properties": {
+          "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
+        }
+      },
+      "ams-site": {
+        "properties": {
+          "timeline.metrics.service.operation.mode": "embedded"
+        }
+      }
+    }
+    recommendedDefaults = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    properties = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false',
+      'timeline.metrics.service.operation.mode' : 'embedded'
+    }
+    host1 = {
+      "href" : "/api/v1/hosts/host1",
+      "Hosts" : {
+        "cpu_count" : 1,
+        "host_name" : "host1",
+        "os_arch" : "x86_64",
+        "os_type" : "centos6",
+        "ph_cpu_count" : 1,
+        "public_host_name" : "host1",
+        "rack_info" : "/default-rack",
+        "total_mem" : 2097152,
+        "disk_info": [
+          {
+            "available": str(15<<30), # 15 GB
+            "type": "ext4",
+            "mountpoint": "/"
+          }
+        ]
+      }
+    }
+    host2 = {
+      "href" : "/api/v1/hosts/host2",
+      "Hosts" : {
+        "cpu_count" : 1,
+        "host_name" : "host2",
+        "os_arch" : "x86_64",
+        "os_type" : "centos6",
+        "ph_cpu_count" : 1,
+        "public_host_name" : "host2",
+        "rack_info" : "/default-rack",
+        "total_mem" : 2097152,
+        "disk_info": [
+          {
+            "available": str(15<<30), # 15 GB
+            "type": "ext4",
+            "mountpoint": "/"
+          }
+        ]
+      }
+    }
+
+    hosts = {
+      "items" : [
+        host1, host2
+      ]
+    }
+
+    services = {
+      "services":  [
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["host1", "host2"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["host1", "host2"]
+              }
+            }
+          ]
+        },
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "DATANODE",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        }
+      ],
+      "configurations": configurations
+    }
+    # only 1 partition, enough disk space, no warnings
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = [{'config-name': 'timeline.metrics.service.operation.mode',
+                    'config-type': 'ams-site',
+                    'level': 'ERROR',
+                    'message': "Correct value should be 'distributed' for clusters with more then 1 Metrics collector",
+                    'type': 'configuration'}]
+    self.assertEquals(res, expected)
+
+
+    services = {
+      "services":  [
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["host1"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        },
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "DATANODE",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        }
+      ],
+      "configurations": configurations
+    }
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = []
+    self.assertEquals(res, expected)
+
+  def test_validateAmsHbaseSiteConfigurations(self):
+    configurations = {
+      "hdfs-site": {
+        "properties": {
+          'dfs.datanode.data.dir': "/hadoop/data"
+        }
+      },
+      "core-site": {
+        "properties": {
+          "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
+        }
+      },
+      "ams-site": {
+        "properties": {
+          "timeline.metrics.service.operation.mode": "embedded"
+        }
+      }
+    }
+
+    recommendedDefaults = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    properties = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    host = {
+      "href" : "/api/v1/hosts/host1",
+      "Hosts" : {
+        "cpu_count" : 1,
+        "host_name" : "host1",
+        "os_arch" : "x86_64",
+        "os_type" : "centos6",
+        "ph_cpu_count" : 1,
+        "public_host_name" : "host1",
+        "rack_info" : "/default-rack",
+        "total_mem" : 2097152,
+        "disk_info": [
+          {
+            "available": str(15<<30), # 15 GB
+            "type": "ext4",
+            "mountpoint": "/"
+          }
+        ]
+      }
+    }
+
+    hosts = {
+      "items" : [
+        host
+      ]
+    }
+
+    services = {
+      "services":  [
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["host1"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        },
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "DATANODE",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        }
+      ],
+      "configurations": configurations
+    }
+
+    # only 1 partition, enough disk space, no warnings
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = []
+    self.assertEquals(res, expected)
+
+
+    # 1 partition, no enough disk space
+    host['Hosts']['disk_info'] = [
+      {
+        "available" : '1',
+        "type" : "ext4",
+        "mountpoint" : "/"
+      }
+    ]
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = [
+      {'config-name': 'hbase.rootdir',
+       'config-type': 'ams-hbase-site',
+       'level': 'WARN',
+       'message': 'Ambari Metrics disk space requirements not met. '
+                  '\nRecommended disk space for partition / is 10G',
+       'type': 'configuration'
+      }
+    ]
+    self.assertEquals(res, expected)
+
+    # 2 partitions
+    host['Hosts']['disk_info'] = [
+      {
+        "available": str(15<<30), # 15 GB
+        "type" : "ext4",
+        "mountpoint" : "/grid/0"
+      },
+      {
+        "available" : str(15<<30), # 15 GB
+        "type" : "ext4",
+        "mountpoint" : "/"
+      }
+    ]
+    recommendedDefaults = {
+      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    properties = {
+      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = []
+    self.assertEquals(res, expected)
+
+    # dfs.dir & hbase.rootdir crosscheck + root partition + hbase.rootdir == hbase.tmp.dir warnings
+    properties = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = [
+      {
+        'config-name': 'hbase.rootdir',
+        'config-type': 'ams-hbase-site',
+        'level': 'WARN',
+        'message': 'It is not recommended to use root partition for hbase.rootdir',
+        'type': 'configuration'
+      },
+      {
+        'config-name': 'hbase.tmp.dir',
+        'config-type': 'ams-hbase-site',
+        'level': 'WARN',
+        'message': 'Consider not using / partition for storing metrics temporary data. '
+                   '/ partition is already used as hbase.rootdir to store metrics data',
+        'type': 'configuration'
+      },
+      {
+        'config-name': 'hbase.rootdir',
+        'config-type': 'ams-hbase-site',
+        'level': 'WARN',
+        'message': 'Consider not using / partition for storing metrics data. '
+                   '/ is already used by datanode to store HDFS data',
+        'type': 'configuration'
+      }
+    ]
+    self.assertEquals(res, expected)
+
+    # incorrect hbase.rootdir in distributed mode
+    properties = {
+      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    configurations['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = [
+      {
+        'config-name': 'hbase.rootdir',
+        'config-type': 'ams-hbase-site',
+        'level': 'WARN',
+        'message': 'In distributed mode hbase.rootdir should point to HDFS.',
+        'type': 'configuration'
+      },
+      {
+        'config-name': 'hbase.cluster.distributed',
+        'config-type': 'ams-hbase-site',
+        'level': 'ERROR',
+        'message': 'hbase.cluster.distributed property should be set to true for distributed mode',
+        'type': 'configuration'
+      }
+    ]
+    self.assertEquals(res, expected)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 65b23b0..38d6ecd 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -2315,214 +2315,6 @@ class TestHDP206StackAdvisor(TestCase):
 
 
 
-  def test_recommendAmsConfigurations(self):
-    configurations = {
-      "hadoop-env": {
-        "properties": {
-          "hdfs_user": "hdfs",
-          "proxyuser_group": "users"
-        }
-      }
-    }
-
-    hosts = {
-      "items": [
-        {
-          "href": "/api/v1/hosts/host1",
-          "Hosts": {
-            "cpu_count": 1,
-            "host_name": "c6401.ambari.apache.org",
-            "os_arch": "x86_64",
-            "os_type": "centos6",
-            "ph_cpu_count": 1,
-            "public_host_name": "public.c6401.ambari.apache.org",
-            "rack_info": "/default-rack",
-            "total_mem": 2097152,
-            "disk_info": [{
-              "size": '80000000',
-              "mountpoint": "/"
-            }]
-          }
-        },
-        {
-          "href": "/api/v1/hosts/host2",
-          "Hosts": {
-            "cpu_count": 1,
-            "host_name": "c6402.ambari.apache.org",
-            "os_arch": "x86_64",
-            "os_type": "centos6",
-            "ph_cpu_count": 1,
-            "public_host_name": "public.c6402.ambari.apache.org",
-            "rack_info": "/default-rack",
-            "total_mem": 1048576,
-            "disk_info": [{
-              "size": '800000000',
-              "mountpoint": "/"
-            }]
-          }
-        }
-      ]}
-
-
-    services1 = {
-      "services": [
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          }, "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NAMENODE",
-              "hostnames": ["c6401.ambari.apache.org"]
-            }
-          }
-        ]
-        },
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
-              }
-            }
-          ]
-        }],
-      "configurations": configurations,
-      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
-    }
-
-    clusterData = {
-      "totalAvailableRam": 2048
-    }
-
-    expected = {'ams-env': {'properties': {'metrics_collector_heapsize': '512'}},
-                  'ams-grafana-env': {'properties': {},
-                                                             'property_attributes': {'metrics_grafana_password': {'visible': 'false'}}},
-                  'ams-hbase-env': {'properties': {'hbase_log_dir': '/var/log/ambari-metrics-collector',
-                                                                                       'hbase_master_heapsize': '512',
-                                                                                       'hbase_master_xmn_size': '102',
-                                                                                       'hbase_regionserver_heapsize': '1024',
-                                                                                       'regionserver_xmn_size': '128'}},
-                  'ams-hbase-site': {'properties': {'hbase.cluster.distributed': 'true',
-                                                                                         'hbase.hregion.memstore.flush.size': '134217728',
-                                                                                         'hbase.regionserver.global.memstore.lowerLimit': '0.3',
-                                                                                         'hbase.regionserver.global.memstore.upperLimit': '0.35',
-                                                                                         'hbase.rootdir': '/user/ams/hbase',
-                                                                                         'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase-tmp',
-                                                                                         'hbase.zookeeper.property.clientPort': '2181',
-                                                                                         'hfile.block.cache.size': '0.3'}},
-                  'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
-                                                                             'timeline.metrics.cache.size': '100',
-                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.service.handler.thread.count': '20',
-                                                                             'timeline.metrics.service.operation.mode': 'distributed',
-                                                                             'timeline.metrics.service.watcher.disabled': 'true',
-                                                                             'timeline.metrics.service.webapp.address': '0.0.0.0:6188'}},
-                  'hadoop-env': {'properties': {'hdfs_user': 'hdfs',
-                                                                                 'proxyuser_group': 'users'}}}
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services1, hosts)
-    self.assertEquals(configurations, expected)
-
-    services1 = {
-      "services": [
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          }, "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NAMENODE",
-              "hostnames": ["c6401.ambari.apache.org"]
-            }
-          }
-        ]
-        },
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["c6401.ambari.apache.org"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
-              }
-            }
-          ]
-        }],
-      "configurations": configurations,
-      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
-    }
-    expected = {'ams-env': {'properties': {'metrics_collector_heapsize': '512'}},
-                  'ams-grafana-env': {'properties': {},
-                                                             'property_attributes': {'metrics_grafana_password': {'visible': 'false'}}},
-                  'ams-hbase-env': {'properties': {'hbase_log_dir': '/var/log/ambari-metrics-collector',
-                                                                                       'hbase_master_heapsize': '512',
-                                                                                       'hbase_master_xmn_size': '102',
-                                                                                       'hbase_regionserver_heapsize': '1024',
-                                                                                       'regionserver_xmn_size': '128'}},
-                  'ams-hbase-site': {'properties': {'hbase.cluster.distributed': 'true',
-                                                                                         'hbase.hregion.memstore.flush.size': '134217728',
-                                                                                         'hbase.regionserver.global.memstore.lowerLimit': '0.3',
-                                                                                         'hbase.regionserver.global.memstore.upperLimit': '0.35',
-                                                                                         'hbase.rootdir': '/user/ams/hbase',
-                                                                                         'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase-tmp',
-                                                                                         'hbase.zookeeper.property.clientPort': '2181',
-                                                                                         'hfile.block.cache.size': '0.3',
-                                                                                         'phoenix.coprocessor.maxMetaDataCacheSize': '20480000'}},
-                  'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
-                                                                             'timeline.metrics.cache.size': '100',
-                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.service.handler.thread.count': '20',
-                                                                             'timeline.metrics.service.operation.mode': 'distributed',
-                                                                             'timeline.metrics.service.watcher.disabled': 'true',
-                                                                             'timeline.metrics.service.webapp.address': '0.0.0.0:6188'}},
-                  'hadoop-env': {'properties': {'hdfs_user': 'hdfs',
-                                                                                 'proxyuser_group': 'users'}}}
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services1, hosts)
-    self.assertEquals(configurations, expected)
-
-  def test_getHostNamesWithComponent(self):
-
-    services = {
-      "services":  [
-        {
-          "StackServices": {
-            "service_name": "SERVICE"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "COMPONENT",
-                "hostnames": ["host1","host2","host3"]
-              }
-            }
-          ]
-        }
-      ],
-      "configurations": {}
-    }
-
-    result = self.stackAdvisor.getHostNamesWithComponent("SERVICE","COMPONENT", services)
-    expected = ["host1","host2","host3"]
-    self.assertEquals(result, expected)
 
 
   def test_getZKHostPortString(self):
@@ -2908,374 +2700,6 @@ class TestHDP206StackAdvisor(TestCase):
     validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, hosts)
     self.assertEquals(validation_problems, expected)
 
-  def test_validateAmsSiteConfigurations(self):
-    configurations = {
-      "hdfs-site": {
-        "properties": {
-          'dfs.datanode.data.dir': "/hadoop/data"
-        }
-      },
-      "core-site": {
-        "properties": {
-          "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
-        }
-      },
-      "ams-site": {
-        "properties": {
-          "timeline.metrics.service.operation.mode": "embedded"
-        }
-      }
-    }
-    recommendedDefaults = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    properties = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false',
-      'timeline.metrics.service.operation.mode' : 'embedded'
-    }
-    host1 = {
-      "href" : "/api/v1/hosts/host1",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "host_name" : "host1",
-        "os_arch" : "x86_64",
-        "os_type" : "centos6",
-        "ph_cpu_count" : 1,
-        "public_host_name" : "host1",
-        "rack_info" : "/default-rack",
-        "total_mem" : 2097152,
-        "disk_info": [
-          {
-            "available": str(15<<30), # 15 GB
-            "type": "ext4",
-            "mountpoint": "/"
-          }
-        ]
-      }
-    }
-    host2 = {
-      "href" : "/api/v1/hosts/host2",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "host_name" : "host2",
-        "os_arch" : "x86_64",
-        "os_type" : "centos6",
-        "ph_cpu_count" : 1,
-        "public_host_name" : "host2",
-        "rack_info" : "/default-rack",
-        "total_mem" : 2097152,
-        "disk_info": [
-          {
-            "available": str(15<<30), # 15 GB
-            "type": "ext4",
-            "mountpoint": "/"
-          }
-        ]
-      }
-    }
-
-    hosts = {
-      "items" : [
-        host1, host2
-      ]
-    }
-
-    services = {
-      "services":  [
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["host1", "host2"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["host1", "host2"]
-              }
-            }
-          ]
-        },
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "DATANODE",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        }
-      ],
-      "configurations": configurations
-    }
-    # only 1 partition, enough disk space, no warnings
-    res = self.stackAdvisor.validateAmsSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = [{'config-name': 'timeline.metrics.service.operation.mode',
-                    'config-type': 'ams-site',
-                    'level': 'ERROR',
-                    'message': "Correct value should be 'distributed' for clusters with more then 1 Metrics collector",
-                    'type': 'configuration'}]
-    self.assertEquals(res, expected)
-
-
-    services = {
-      "services":  [
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["host1"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        },
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "DATANODE",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        }
-      ],
-      "configurations": configurations
-    }
-    res = self.stackAdvisor.validateAmsSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = []
-    self.assertEquals(res, expected)
-
-  def test_validateAmsHbaseSiteConfigurations(self):
-    configurations = {
-      "hdfs-site": {
-        "properties": {
-          'dfs.datanode.data.dir': "/hadoop/data"
-        }
-      },
-      "core-site": {
-        "properties": {
-          "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
-        }
-      },
-      "ams-site": {
-        "properties": {
-          "timeline.metrics.service.operation.mode": "embedded"
-        }
-      }
-    }
-
-    recommendedDefaults = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    properties = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    host = {
-      "href" : "/api/v1/hosts/host1",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "host_name" : "host1",
-        "os_arch" : "x86_64",
-        "os_type" : "centos6",
-        "ph_cpu_count" : 1,
-        "public_host_name" : "host1",
-        "rack_info" : "/default-rack",
-        "total_mem" : 2097152,
-        "disk_info": [
-          {
-            "available": str(15<<30), # 15 GB
-            "type": "ext4",
-            "mountpoint": "/"
-          }
-        ]
-      }
-    }
-
-    hosts = {
-      "items" : [
-        host
-      ]
-    }
-
-    services = {
-      "services":  [
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["host1"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        },
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "DATANODE",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        }
-      ],
-      "configurations": configurations
-    }
-
-    # only 1 partition, enough disk space, no warnings
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = []
-    self.assertEquals(res, expected)
-
-
-    # 1 partition, no enough disk space
-    host['Hosts']['disk_info'] = [
-      {
-        "available" : '1',
-        "type" : "ext4",
-        "mountpoint" : "/"
-      }
-    ]
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = [
-      {'config-name': 'hbase.rootdir',
-       'config-type': 'ams-hbase-site',
-       'level': 'WARN',
-       'message': 'Ambari Metrics disk space requirements not met. '
-                  '\nRecommended disk space for partition / is 10G',
-       'type': 'configuration'
-      }
-    ]
-    self.assertEquals(res, expected)
-
-    # 2 partitions
-    host['Hosts']['disk_info'] = [
-      {
-        "available": str(15<<30), # 15 GB
-        "type" : "ext4",
-        "mountpoint" : "/grid/0"
-      },
-      {
-        "available" : str(15<<30), # 15 GB
-        "type" : "ext4",
-        "mountpoint" : "/"
-      }
-    ]
-    recommendedDefaults = {
-      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    properties = {
-      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = []
-    self.assertEquals(res, expected)
-
-    # dfs.dir & hbase.rootdir crosscheck + root partition + hbase.rootdir == hbase.tmp.dir warnings
-    properties = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = [
-      {
-        'config-name': 'hbase.rootdir',
-        'config-type': 'ams-hbase-site',
-        'level': 'WARN',
-        'message': 'It is not recommended to use root partition for hbase.rootdir',
-        'type': 'configuration'
-      },
-      {
-        'config-name': 'hbase.tmp.dir',
-        'config-type': 'ams-hbase-site',
-        'level': 'WARN',
-        'message': 'Consider not using / partition for storing metrics temporary data. '
-                   '/ partition is already used as hbase.rootdir to store metrics data',
-        'type': 'configuration'
-      },
-      {
-        'config-name': 'hbase.rootdir',
-        'config-type': 'ams-hbase-site',
-        'level': 'WARN',
-        'message': 'Consider not using / partition for storing metrics data. '
-                   '/ is already used by datanode to store HDFS data',
-        'type': 'configuration'
-      }
-    ]
-    self.assertEquals(res, expected)
-
-    # incorrect hbase.rootdir in distributed mode
-    properties = {
-      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    configurations['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = [
-      {
-        'config-name': 'hbase.rootdir',
-        'config-type': 'ams-hbase-site',
-        'level': 'WARN',
-        'message': 'In distributed mode hbase.rootdir should point to HDFS.',
-        'type': 'configuration'
-      },
-      {
-        'config-name': 'hbase.cluster.distributed',
-        'config-type': 'ams-hbase-site',
-        'level': 'ERROR',
-        'message': 'hbase.cluster.distributed property should be set to true for distributed mode',
-        'type': 'configuration'
-      }
-    ]
-    self.assertEquals(res, expected)
 
   def test_validateStormSiteConfigurations(self):
     configurations = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index d6b572e..6f9e18e 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -2300,517 +2300,6 @@ class TestHDP22StackAdvisor(TestCase):
     self.stackAdvisor.recommendMapReduce2Configurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations, expected)
 
-  def test_recommendAmsConfigurations(self):
-    configurations = {}
-    clusterData = {}
-
-    services = {
-      "services":  [ {
-        "StackServices": {
-          "service_name": "AMBARI_METRICS"
-        },
-        "components": [{
-          "StackServiceComponents": {
-            "component_name": "METRICS_COLLECTOR",
-            "hostnames": ["host1"]
-          }
-
-        }, {
-          "StackServiceComponents": {
-            "component_name": "METRICS_MONITOR",
-            "hostnames": ["host1"]
-          }
-
-        }]
-      }],
-      "configurations": []
-    }
-    hosts = {
-      "items": [{
-        "Hosts": {
-          "host_name": "host1",
-
-        }
-      }]
-    }
-
-    # 1-node cluster
-    expected = {
-      "ams-hbase-env": {
-        "properties": {
-          "hbase_master_xmn_size": "128",
-          "hbase_master_heapsize": "512",
-          "hbase_regionserver_heapsize": "512"
-        }
-      },
-      "ams-grafana-env": {
-        "properties" : {},
-        "property_attributes": {
-          "metrics_grafana_password": {
-            "visible": "false"
-          }
-        }
-      },
-      "ams-env": {
-        "properties": {
-          "metrics_collector_heapsize": "512",
-        }
-      },
-      "ams-hbase-site": {
-        "properties": {
-          "phoenix.coprocessor.maxMetaDataCacheSize": "20480000",
-          "hbase.regionserver.global.memstore.lowerLimit": "0.3",
-          "hbase.regionserver.global.memstore.upperLimit": "0.35",
-          "hbase.hregion.memstore.flush.size": "134217728",
-          "hfile.block.cache.size": "0.3",
-          "hbase.cluster.distributed": "false",
-          "hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase",
-          "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp",
-          "hbase.zookeeper.property.clientPort": "61181",
-        }
-      },
-      "ams-site": {
-        "properties": {
-          "timeline.metrics.cluster.aggregate.splitpoints": "mem_buffered",
-          "timeline.metrics.host.aggregate.splitpoints": "mem_buffered",
-          "timeline.metrics.service.handler.thread.count": "20",
-          'timeline.metrics.service.webapp.address': '0.0.0.0:6188',
-          'timeline.metrics.service.watcher.disabled': 'false',
-          'timeline.metrics.cache.size': '100',
-          'timeline.metrics.cache.commit.interval': '10'
-        }
-      }
-    }
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # 100-nodes cluster, but still only 1 sink (METRICS_COLLECTOR)
-    for i in range(2, 201):
-      hosts['items'].extend([{
-        "Hosts": {
-          "host_name": "host" + str(i)
-          }
-      }])
-
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "AMBARI_METRICS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_COLLECTOR",
-              "hostnames": ["host1"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_MONITOR",
-              "hostnames": ["host" + str(i) for i in range(1, 201)]
-            }
-          }
-        ]
-      }
-    ]
-
-    expected["ams-site"]['properties']['timeline.metrics.cache.size'] = '500'
-    expected["ams-site"]['properties']['timeline.metrics.cache.commit.interval'] = '7'
-    expected["ams-hbase-env"]['properties']['hbase_master_heapsize'] = '2560'
-    expected["ams-hbase-env"]['properties']['hbase_master_xmn_size'] = '448'
-    expected["ams-env"]['properties']['metrics_collector_heapsize'] = '896'
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # 200 nodes, but with HDFS and YARN services installed on all nodes
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "HDFS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NAMENODE",
-              "hostnames": ["host1"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DATANODE",
-              "hostnames": ["host" + str(i) for i in range(1, 201)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "YARN"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "RESOURCEMANAGER",
-              "hostnames": ["host1"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "NODEMANAGER",
-              "hostnames": ["host" + str(i) for i in range(1, 201)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "AMBARI_METRICS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_COLLECTOR",
-              "hostnames": ["host1"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_MONITOR",
-              "hostnames": ["host" + str(i) for i in range(1, 201)]
-            }
-          }
-        ]
-      }
-
-    ]
-    expected["ams-site"]['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'dfs.FSNamesystem.FilesTotal,' \
-                                                                                        'dfs.datanode.WritesFromRemoteClient,' \
-                                                                                        'ipc.IPC.numCallsInReplicationQueue,' \
-                                                                                        'mapred.ShuffleMetrics.ShuffleOutputsFailed,' \
-                                                                                        'mem_buffered,' \
-                                                                                        'read_count,' \
-                                                                                        'regionserver.Server.percentFilesLocal,' \
-                                                                                        'rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps,' \
-                                                                                        'sdisk_vdb_write_count'
-    expected["ams-site"]['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'mem_total'
-
-    expected["ams-site"]['properties']['timeline.metrics.cache.size'] = '600'
-    expected["ams-site"]['properties']['timeline.metrics.cache.commit.interval'] = '6'
-    expected["ams-hbase-env"]['properties']['hbase_master_heapsize'] = '6656'
-    expected["ams-hbase-env"]['properties']['hbase_master_xmn_size'] = '1088'
-    expected["ams-env"]['properties']['metrics_collector_heapsize'] = '2176'
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # Test splitpoints, AMS embedded mode
-    services['changed-configurations'] = [
-      {
-        "type": "ams-hbase-env",
-        "name": "hbase_master_heapsize",
-        "old_value": "1024"
-      }
-    ]
-
-    services['configurations'] = {
-      'core-site': {'properties': {}},
-      'ams-site': {'properties': {}},
-      'ams-hbase-site': {'properties': {}},
-      'ams-hbase-env': {'properties': {}}
-    }
-
-    # Embedded mode, 512m master heapsize, no splitpoints recommended
-    services["configurations"]['ams-hbase-env']['properties']['hbase_master_heapsize'] = '512'
-    services["configurations"]['ams-hbase-site']['properties']['hbase.regionserver.global.memstore.upperLimit'] = '0.4'
-    services["configurations"]['ams-hbase-site']['properties']['hbase.hregion.memstore.flush.size'] = '134217728'
-
-    expected['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-hbase-env']['properties']['hbase_master_heapsize'] = '512'
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # Embedded mode, 4096m master heapsize, some splitpoints recommended
-    services["configurations"]['ams-hbase-env']['properties']['hbase_master_heapsize'] = '4096'
-    expected['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'dfs.namenode.BlockReportAvgTime,' \
-                                                                                        'master.AssignmentManger.Assign_mean,' \
-                                                                                        'regionserver.Server.Append_median,' \
-                                                                                        'rpcdetailed.rpcdetailed.client.CheckAccessNumOps'
-    expected['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-hbase-env']['properties']['hbase_master_heapsize'] = '4096'
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-
-    # Embedded mode, 8192m master heapsize, more splitpoints recommended
-    services["configurations"]['ams-hbase-env']['properties']['hbase_master_heapsize'] = '8192'
-    expected['ams-hbase-env']['properties']['hbase_master_heapsize'] = '8192'
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(len(configurations['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'].split(',')), 13)
-    self.assertEquals(len(configurations['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'].split(',')), 2)
-
-    # Test splitpoints, AMS distributed mode
-    services['changed-configurations'] = [
-      {
-        "type": "ams-hbase-env",
-        "name": "hbase_regionserver_heapsize",
-        "old_value": "512"
-      }
-    ]
-    services["configurations"]['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
-    services["configurations"]["core-site"]["properties"]["fs.defaultFS"] = 'hdfs://host1:8020'
-    expected['ams-hbase-site']['properties']['hbase.cluster.distributed'] = 'true'
-    expected['ams-hbase-site']['properties']['hbase.rootdir'] = '/user/ams/hbase'
-    expected['ams-hbase-site']['properties']['hbase.zookeeper.property.clientPort'] = '2181'
-    expected['ams-hbase-env']['properties']['hbase_master_heapsize'] = '512'
-    expected['ams-hbase-site']['properties']['dfs.client.read.shortcircuit'] = 'true'
-
-    # Distributed mode, low memory, no splitpoints recommended
-    expected['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-hbase-env']['properties']['hbase_regionserver_heapsize'] = '6656'
-    expected["ams-hbase-env"]['properties']['hbase_master_xmn_size'] = '102'
-    expected['ams-hbase-env']['properties']['regionserver_xmn_size'] = '1024'
-    expected['ams-site']['properties']['timeline.metrics.service.watcher.disabled'] = 'true'
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # Distributed mode, more memory, more splitpoints recommended
-    services["configurations"]['ams-hbase-env']['properties']['hbase_regionserver_heapsize'] = '8192'
-    expected['ams-hbase-env']['properties']['hbase_regionserver_heapsize'] = '8192'
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(len(configurations['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'].split(',')), 13)
-    self.assertEquals(len(configurations['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'].split(',')), 2)
-
-    # 2000-nodes cluster
-    for i in range(202, 2001):
-        hosts['items'].extend([{
-            "Hosts": {
-                "host_name": "host" + str(i)
-            }
-        }])
-
-    services['services'] = [
-        {
-            "StackServices": {
-                "service_name": "AMBARI_METRICS"
-            },
-            "components": [
-                {
-                    "StackServiceComponents": {
-                        "component_name": "METRICS_COLLECTOR",
-                        "hostnames": ["host1"]
-                    }
-                },
-                {
-                    "StackServiceComponents": {
-                        "component_name": "METRICS_MONITOR",
-                        "hostnames": ["host" + str(i) for i in range(1, 2001)]
-                    }
-                }
-            ]
-        }
-    ]
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations["ams-site"]['properties']['timeline.metrics.cache.size'], '700')
-    self.assertEquals(configurations["ams-site"]['properties']['timeline.metrics.cache.commit.interval'], '5')
-
-    # 500 Nodes with HDFS, YARN, HIVE, STORM, HBASE, KAFKA, AMS
-    node_count = 500
-    hosts = {
-      "items": []
-    }
-    for i in range(1, node_count):
-      hosts['items'].extend([{
-        "Hosts": {
-          "host_name": "host" + str(i)
-          }
-      }])
-
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "HDFS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NAMENODE",
-              "hostnames": ["host1"]
-            }
-          } ,
-          {
-            "StackServiceComponents": {
-              "component_name": "SECONDARY_NAMENODE",
-              "hostnames": ["host2"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DATANODE",
-              "hostnames": ["host" + str(i) for i in range(6, node_count + 1)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "HBASE"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "HBASE_MASTER",
-              "hostnames": ["host3"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "HBASE_REGIONSERVER",
-              "hostnames": ["host" + str(i) for i in range(6, node_count + 1)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "YARN"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "RESOURCEMANAGER",
-              "hostnames": ["host4"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "NODEMANAGER",
-              "hostnames": ["host" + str(i) for i in range(6, node_count + 1)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "HIVE"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "HIVE_METASTORE",
-              "hostnames": ["host3"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "HIVE_SERVER",
-              "hostnames": ["host3"]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "STORM"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NIMBUS",
-              "hostnames": ["host" + str(i) for i in range(1, 6)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "KAFKA"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "KAFKA_BROKER",
-              "hostnames": ["host" + str(i) for i in range(1, 6)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "AMBARI_METRICS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_COLLECTOR",
-              "hostnames": ["host6"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_MONITOR",
-              "hostnames": ["host" + str(i) for i in range(6, node_count + 1)]
-            }
-          }
-        ]
-      }
-    ]
-
-    services['configurations'] = {
-      'core-site': {'properties': {}},
-      'ams-site': {'properties': {}},
-      'ams-hbase-site': {'properties': {}},
-      'ams-hbase-env': {'properties': {}}
-    }
-    services["configurations"]['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
-
-    expected['ams-hbase-site']['properties']['hbase.cluster.distributed'] = 'true'
-    expected['ams-hbase-site']['properties']['hbase.rootdir'] = '/user/ams/hbase'
-    expected['ams-hbase-site']['properties']['hbase.zookeeper.property.clientPort'] = '2181'
-
-    expected["ams-site"]['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'default.General.active_calls_api_get_all_databases,' \
-                                                                                        'default.General.api_get_database_mean,' \
-                                                                                        'default.General.gc.PS-MarkSweep.count,' \
-                                                                                        'dfs.FsVolume.TotalDataFileIos,' \
-                                                                                        'disk_free,' \
-                                                                                        'jvm.JvmMetrics.MemHeapMaxM,' \
-                                                                                        'kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.75percentile,' \
-                                                                                        'kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.mean,' \
-                                                                                        'load_one,master.FileSystem.MetaHlogSplitTime_75th_percentile,' \
-                                                                                        'metricssystem.MetricsSystem.NumActiveSources,' \
-                                                                                        'regionserver.Server.Append_95th_percentile,' \
-                                                                                        'regionserver.Server.blockCacheEvictionCount,' \
-                                                                                        'rpc.rpc.client.SentBytes,' \
-                                                                                        'sdisk_vda1_write_bytes'
-    expected["ams-site"]['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'ipc.IPC.authorizationSuccesses,' \
-                                                                                           'metricssystem.MetricsSystem.PublishNumOps'
-
-    expected["ams-site"]['properties']['timeline.metrics.cache.size'] = '700'
-    expected["ams-site"]['properties']['timeline.metrics.cache.commit.interval'] = '5'
-    expected["ams-site"]['properties']['timeline.metrics.service.resultset.fetchSize'] = '5000'
-    expected["ams-site"]['properties']['phoenix.query.maxGlobalMemoryPercentage'] = '30'
-
-    expected["ams-env"]['properties']['metrics_collector_heapsize'] = '7040'
-
-    expected["ams-hbase-env"]['properties']['hbase_master_heapsize'] = '512'
-    expected["ams-hbase-env"]['properties']['hbase_master_xmn_size'] = '102'
-
-    expected["ams-hbase-env"]['properties']['hbase_regionserver_heapsize'] = '21120'
-    expected["ams-hbase-env"]['properties']['regionserver_xmn_size'] = '3200'
-
-    expected["ams-hbase-site"]['properties']['phoenix.query.maxGlobalMemoryPercentage'] = '20'
-    expected['ams-hbase-site']['properties']['hbase.hregion.memstore.flush.size'] = '268435456'
-    expected['ams-hbase-site']['properties']['hbase.regionserver.handler.count'] = '60'
-    expected['ams-hbase-site']['properties']['hbase.regionserver.hlog.blocksize'] = '134217728'
-    expected['ams-hbase-site']['properties']['hbase.regionserver.maxlogs'] = '64'
-    expected['ams-hbase-site']['properties']['phoenix.coprocessor.maxMetaDataCacheSize'] = '40960000'
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
 
   def test_recommendHbaseConfigurations(self):
     servicesList = ["HBASE"]


[2/2] ambari git commit: AMBARI-22124. Refactor AMS logic in stack advisors to service advisors.(vbrodetskyi)

Posted by vb...@apache.org.
AMBARI-22124. Refactor AMS logic in stack advisors to service advisors.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0f32765d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0f32765d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0f32765d

Branch: refs/heads/trunk
Commit: 0f32765dc2250044c7925f4e68e6f61b7a77d8f8
Parents: 9adfcdc
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Fri Oct 6 10:40:33 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Fri Oct 6 10:40:33 2017 +0300

----------------------------------------------------------------------
 .../AMBARI_METRICS/0.1.0/service_advisor.py     | 787 +++++++++++++++++++
 .../ATLAS/0.7.0.3.0/service_advisor.py          |   5 +-
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 542 +------------
 .../stacks/HDP/2.2/services/stack_advisor.py    |   1 -
 .../AMBARI_METRICS/test_service_advisor.py      | 596 ++++++++++++++
 .../stacks/2.0.6/common/test_stack_advisor.py   | 576 --------------
 .../stacks/2.2/common/test_stack_advisor.py     | 511 ------------
 7 files changed, 1388 insertions(+), 1630 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
new file mode 100644
index 0000000..eae98bf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
@@ -0,0 +1,787 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import re
+import os
+import sys
+import socket
+import traceback
+from math import ceil, floor, log
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+#split points
+metricsDir = os.path.join(SCRIPT_DIR, 'package')
+print "METRICS_DIR=>" + str(metricsDir)
+serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
+customServiceMetricsDir = os.path.join(SCRIPT_DIR, '../../../dashboards/service-metrics')
+sys.path.append(os.path.join(metricsDir, 'scripts'))
+
+from split_points import FindSplitPointsForAMSRegions
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class AMBARI_METRICSServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(AMBARI_METRICSServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    min_val = 1
+
+    self.cardinalitiesDict.update(
+      {
+        'METRICS_COLLECTOR': {"min": min_val}
+      }
+    )
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    self.heap_size_properties = {"METRICS_COLLECTOR":
+                                   [{"config-name": "ams-hbase-env",
+                                     "property": "hbase_master_heapsize",
+                                     "default": "1024m"},
+                                    {"config-name": "ams-hbase-env",
+                                     "property": "hbase_regionserver_heapsize",
+                                     "default": "1024m"},
+                                    {"config-name": "ams-env",
+                                     "property": "metrics_collector_heapsize",
+                                     "default": "512m"}]}
+
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    self.notValuableComponents |= set(['METRICS_MONITOR'])
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    self.notPreferableOnServerComponents |= set(['METRICS_COLLECTOR'])
+
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    self.componentLayoutSchemes.update({'METRICS_COLLECTOR': {3: 2, 6: 2, 31: 3, "else": 5}})
+
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getAmsMemoryRecommendation(self, services, hosts):
+    # MB per sink in hbase heapsize
+    HEAP_PER_MASTER_COMPONENT = 50
+    HEAP_PER_SLAVE_COMPONENT = 10
+
+    schMemoryMap = {
+      "HDFS": {
+        "NAMENODE": HEAP_PER_MASTER_COMPONENT,
+        "SECONDARY_NAMENODE": HEAP_PER_MASTER_COMPONENT,
+        "DATANODE": HEAP_PER_SLAVE_COMPONENT
+      },
+      "YARN": {
+        "RESOURCEMANAGER": HEAP_PER_MASTER_COMPONENT,
+        "NODEMANAGER": HEAP_PER_SLAVE_COMPONENT,
+        "HISTORYSERVER" : HEAP_PER_MASTER_COMPONENT,
+        "APP_TIMELINE_SERVER": HEAP_PER_MASTER_COMPONENT
+      },
+      "HBASE": {
+        "HBASE_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "HBASE_REGIONSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "HIVE": {
+        "HIVE_METASTORE": HEAP_PER_MASTER_COMPONENT,
+        "HIVE_SERVER": HEAP_PER_MASTER_COMPONENT
+      },
+      "KAFKA": {
+        "KAFKA_BROKER": HEAP_PER_MASTER_COMPONENT
+      },
+      "FLUME": {
+        "FLUME_HANDLER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "STORM": {
+        "NIMBUS": HEAP_PER_MASTER_COMPONENT,
+      },
+      "AMBARI_METRICS": {
+        "METRICS_COLLECTOR": HEAP_PER_MASTER_COMPONENT,
+        "METRICS_MONITOR": HEAP_PER_SLAVE_COMPONENT
+      },
+      "ACCUMULO": {
+        "ACCUMULO_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "ACCUMULO_TSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "LOGSEARCH": {
+        "LOGSEARCH_LOGFEEDER" : HEAP_PER_SLAVE_COMPONENT
+      }
+    }
+    total_sinks_count = 0
+    # minimum heap size
+    hbase_heapsize = 500
+    for serviceName, componentsDict in schMemoryMap.items():
+      for componentName, multiplier in componentsDict.items():
+        schCount = len(
+          self.getHostsWithComponent(serviceName, componentName, services,
+                                     hosts))
+        hbase_heapsize += int((schCount * multiplier))
+        total_sinks_count += schCount
+    collector_heapsize = int(hbase_heapsize/3 if hbase_heapsize > 2048 else 512)
+    hbase_heapsize = min(hbase_heapsize, 32768)
+
+    return self.round_to_n(collector_heapsize), self.round_to_n(hbase_heapsize), total_sinks_count
+
+
+  def round_to_n(self, mem_size, n=128):
+    return int(round(float(mem_size) / float(n))) * int(n)
+
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = AMBARI_METRICSRecommender()
+    recommender.recommendAmsConfigurationsFromHDP206(configurations, clusterData, services, hosts)
+
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = self.getAMBARI_METRICSValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+  def getAMBARI_METRICSValidator(self):
+    return AMBARI_METRICSValidator()
+
+class AMBARI_METRICSRecommender(service_advisor.ServiceAdvisor):
+  """
+  AMS Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(AMBARI_METRICSRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+
+  def getPreferredMountPoints(self, hostInfo):
+
+    # '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points
+    undesirableMountPoints = ["/", "/home", "/etc/resolv.conf", "/etc/hosts",
+                              "/etc/hostname", "/tmp"]
+    undesirableFsTypes = ["devtmpfs", "tmpfs", "vboxsf", "CDFS"]
+    mountPoints = []
+    if hostInfo and "disk_info" in hostInfo:
+      mountPointsDict = {}
+      for mountpoint in hostInfo["disk_info"]:
+        if not (mountpoint["mountpoint"] in undesirableMountPoints or
+                mountpoint["mountpoint"].startswith(("/boot", "/mnt")) or
+                mountpoint["type"] in undesirableFsTypes or
+                mountpoint["available"] == str(0)):
+          mountPointsDict[mountpoint["mountpoint"]] = self.to_number(mountpoint["available"])
+      if mountPointsDict:
+        mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)
+    mountPoints.append("/")
+    return mountPoints
+
+  def recommendAmsConfigurationsFromHDP206(self, configurations, clusterData, services, hosts):
+    putAmsEnvProperty = self.putProperty(configurations, "ams-env", services)
+    putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site", services)
+    putAmsSiteProperty = self.putProperty(configurations, "ams-site", services)
+    putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env", services)
+    putGrafanaProperty = self.putProperty(configurations, "ams-grafana-env", services)
+    putGrafanaPropertyAttribute = self.putPropertyAttribute(configurations, "ams-grafana-env")
+
+    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+
+    serviceAdvisor = AMBARI_METRICSServiceAdvisor()
+
+    # TODO set "timeline.metrics.service.webapp.address" to 0.0.0.0:port in upgrade catalog
+    timeline_metrics_service_webapp_address = '0.0.0.0'
+
+    putAmsSiteProperty("timeline.metrics.service.webapp.address", str(timeline_metrics_service_webapp_address) + ":6188")
+
+    log_dir = "/var/log/ambari-metrics-collector"
+    if "ams-env" in services["configurations"]:
+      if "metrics_collector_log_dir" in services["configurations"]["ams-env"]["properties"]:
+        log_dir = services["configurations"]["ams-env"]["properties"]["metrics_collector_log_dir"]
+      putHbaseEnvProperty("hbase_log_dir", log_dir)
+
+    defaultFs = 'file:///'
+    if "core-site" in services["configurations"] and \
+      "fs.defaultFS" in services["configurations"]["core-site"]["properties"]:
+      defaultFs = services["configurations"]["core-site"]["properties"]["fs.defaultFS"]
+
+    operatingMode = "embedded"
+    if "ams-site" in services["configurations"]:
+      if "timeline.metrics.service.operation.mode" in services["configurations"]["ams-site"]["properties"]:
+        operatingMode = services["configurations"]["ams-site"]["properties"]["timeline.metrics.service.operation.mode"]
+
+    if len(amsCollectorHosts) > 1 :
+      operatingMode = "distributed"
+      putAmsSiteProperty("timeline.metrics.service.operation.mode", operatingMode)
+
+    if operatingMode == "distributed":
+      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'true')
+      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'true')
+    else:
+      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'false')
+      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'false')
+
+    rootDir = "file:///var/lib/ambari-metrics-collector/hbase"
+    tmpDir = "/var/lib/ambari-metrics-collector/hbase-tmp"
+    zk_port_default = []
+    if "ams-hbase-site" in services["configurations"]:
+      if "hbase.rootdir" in services["configurations"]["ams-hbase-site"]["properties"]:
+        rootDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.rootdir"]
+      if "hbase.tmp.dir" in services["configurations"]["ams-hbase-site"]["properties"]:
+        tmpDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.tmp.dir"]
+      if "hbase.zookeeper.property.clientPort" in services["configurations"]["ams-hbase-site"]["properties"]:
+        zk_port_default = services["configurations"]["ams-hbase-site"]["properties"]["hbase.zookeeper.property.clientPort"]
+
+      # Skip recommendation item if default value is present
+    if operatingMode == "distributed" and not "{{zookeeper_clientPort}}" in zk_port_default:
+      zkPort = self.getZKPort(services)
+      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", zkPort)
+    elif operatingMode == "embedded" and not "{{zookeeper_clientPort}}" in zk_port_default:
+      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", "61181")
+
+    mountpoints = ["/"]
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          mountpoints = self.getPreferredMountPoints(host["Hosts"])
+          break
+    isLocalRootDir = rootDir.startswith("file://") or (defaultFs.startswith("file://") and rootDir.startswith("/"))
+    if isLocalRootDir:
+      rootDir = re.sub("^file:///|/", "", rootDir, count=1)
+      rootDir = "file://" + os.path.join(mountpoints[0], rootDir)
+    tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
+    if len(mountpoints) > 1 and isLocalRootDir:
+      tmpDir = os.path.join(mountpoints[1], tmpDir)
+    else:
+      tmpDir = os.path.join(mountpoints[0], tmpDir)
+    putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
+
+    if operatingMode == "distributed":
+      putAmsHbaseSiteProperty("hbase.rootdir", "/user/ams/hbase")
+
+    if operatingMode == "embedded":
+      if isLocalRootDir:
+        putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
+      else:
+        putAmsHbaseSiteProperty("hbase.rootdir", "file:///var/lib/ambari-metrics-collector/hbase")
+
+    collector_heapsize, hbase_heapsize, total_sinks_count = serviceAdvisor.getAmsMemoryRecommendation(services, hosts)
+
+    putAmsEnvProperty("metrics_collector_heapsize", collector_heapsize)
+
+    putAmsSiteProperty("timeline.metrics.cache.size", max(100, int(log(total_sinks_count)) * 100))
+    putAmsSiteProperty("timeline.metrics.cache.commit.interval", min(10, max(12 - int(log(total_sinks_count)), 2)))
+
+    # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
+    putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
+    putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 134217728)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
+
+    if len(amsCollectorHosts) > 1:
+      pass
+    else:
+      # blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3
+      if total_sinks_count >= 2000:
+        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
+        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
+        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
+        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
+        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.3)
+        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.25)
+        putAmsHbaseSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 20)
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 81920000)
+        putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
+        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 10000)
+      elif total_sinks_count >= 1000:
+        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
+        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
+        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
+        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 40960000)
+        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 5000)
+      else:
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 20480000)
+      pass
+
+    metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))
+    putAmsSiteProperty("timeline.metrics.service.handler.thread.count", metrics_api_handlers)
+
+    serviceAdvisor = AMBARI_METRICSServiceAdvisor()
+
+    # Distributed mode heap size
+    if operatingMode == "distributed":
+      hbase_heapsize = max(hbase_heapsize, 1024)
+      putHbaseEnvProperty("hbase_master_heapsize", "512")
+      putHbaseEnvProperty("hbase_master_xmn_size", "102") #20% of 512 heap size
+      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
+      putHbaseEnvProperty("regionserver_xmn_size", serviceAdvisor.round_to_n(0.15 * hbase_heapsize,64))
+    else:
+      # Embedded mode heap size : master + regionserver
+      hbase_rs_heapsize = 512
+      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_rs_heapsize)
+      putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
+      putHbaseEnvProperty("hbase_master_xmn_size", serviceAdvisor.round_to_n(0.15*(hbase_heapsize + hbase_rs_heapsize),64))
+
+    # If no local DN in distributed mode
+    if operatingMode == "distributed":
+      dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
+      # call by Kerberos wizard sends only the service being affected
+      # so it is possible for dn_hosts to be None but not amsCollectorHosts
+      if dn_hosts and len(dn_hosts) > 0:
+        if set(amsCollectorHosts).intersection(dn_hosts):
+          collector_cohosted_with_dn = "true"
+        else:
+          collector_cohosted_with_dn = "false"
+        putAmsHbaseSiteProperty("dfs.client.read.shortcircuit", collector_cohosted_with_dn)
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    ams_hbase_site = None
+    ams_hbase_env = None
+
+    # Overriden properties form the UI
+    if "ams-hbase-site" in services["configurations"]:
+      ams_hbase_site = services["configurations"]["ams-hbase-site"]["properties"]
+    if "ams-hbase-env" in services["configurations"]:
+      ams_hbase_env = services["configurations"]["ams-hbase-env"]["properties"]
+
+    # Recommendations
+    if not ams_hbase_site:
+      ams_hbase_site = configurations["ams-hbase-site"]["properties"]
+    if not ams_hbase_env:
+      ams_hbase_env = configurations["ams-hbase-env"]["properties"]
+
+    split_point_finder = FindSplitPointsForAMSRegions(
+      ams_hbase_site, ams_hbase_env, serviceMetricsDir, customServiceMetricsDir, operatingMode, servicesList)
+
+    result = split_point_finder.get_split_points()
+    precision_splits = ' '
+    aggregate_splits = ' '
+    if result.precision:
+      precision_splits = result.precision
+    if result.aggregate:
+      aggregate_splits = result.aggregate
+    putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
+    putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
+
+    component_grafana_exists = False
+    for service in services['services']:
+      if 'components' in service:
+        for component in service['components']:
+          if 'StackServiceComponents' in component:
+            # If Grafana is installed the hostnames would indicate its location
+            if 'METRICS_GRAFANA' in component['StackServiceComponents']['component_name'] and\
+              len(component['StackServiceComponents']['hostnames']) != 0:
+              component_grafana_exists = True
+              break
+    pass
+
+    if not component_grafana_exists:
+      putGrafanaPropertyAttribute("metrics_grafana_password", "visible", "false")
+
+    pass
+
+
+
+class AMBARI_METRICSValidator(service_advisor.ServiceAdvisor):
+  """
+  AMS Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(AMBARI_METRICSValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("ams-hbase-site", self.validateAmsHbaseSiteConfigurationsFromHDP206),
+                       ("ams-hbase-env", self.validateAmsHbaseEnvConfigurationsFromHDP206),
+                       ("ams-site", self.validateAmsSiteConfigurationsFromHDP206),
+                       ("ams-env", self.validateAmsEnvConfigurationsFromHDP206),
+                       ("ams-grafana-env", self.validateGrafanaEnvConfigurationsFromHDP206)]
+
+
+
+  def getPreferredMountPoints(self, hostInfo):
+
+    # '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points
+    undesirableMountPoints = ["/", "/home", "/etc/resolv.conf", "/etc/hosts",
+                              "/etc/hostname", "/tmp"]
+    undesirableFsTypes = ["devtmpfs", "tmpfs", "vboxsf", "CDFS"]
+    mountPoints = []
+    if hostInfo and "disk_info" in hostInfo:
+      mountPointsDict = {}
+      for mountpoint in hostInfo["disk_info"]:
+        if not (mountpoint["mountpoint"] in undesirableMountPoints or
+                mountpoint["mountpoint"].startswith(("/boot", "/mnt")) or
+                mountpoint["type"] in undesirableFsTypes or
+                mountpoint["available"] == str(0)):
+          mountPointsDict[mountpoint["mountpoint"]] = self.to_number(mountpoint["available"])
+      if mountPointsDict:
+        mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)
+    mountPoints.append("/")
+    return mountPoints
+
+  def validateAmsHbaseSiteConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+    ams_site = self.getSiteProperties(configurations, "ams-site")
+    core_site = self.getSiteProperties(configurations, "core-site")
+
+    serviceAdvisor = AMBARI_METRICSServiceAdvisor()
+
+    collector_heapsize, hbase_heapsize, total_sinks_count = serviceAdvisor.getAmsMemoryRecommendation(services, hosts)
+    recommendedDiskSpace = 10485760
+    # TODO validate configuration for multiple AMBARI_METRICS collectors
+    if len(amsCollectorHosts) > 1:
+      pass
+    else:
+      if total_sinks_count > 2000:
+        recommendedDiskSpace  = 104857600  # * 1k == 100 Gb
+      elif total_sinks_count > 500:
+        recommendedDiskSpace  = 52428800  # * 1k == 50 Gb
+      elif total_sinks_count > 250:
+        recommendedDiskSpace  = 20971520  # * 1k == 20 Gb
+
+    validationItems = []
+
+    rootdir_item = None
+    op_mode = ams_site.get("timeline.metrics.service.operation.mode")
+    default_fs = core_site.get("fs.defaultFS") if core_site else "file:///"
+    hbase_rootdir = properties.get("hbase.rootdir")
+    hbase_tmpdir = properties.get("hbase.tmp.dir")
+    distributed = properties.get("hbase.cluster.distributed")
+    is_local_root_dir = hbase_rootdir.startswith("file://") or (default_fs.startswith("file://") and hbase_rootdir.startswith("/"))
+
+    if op_mode == "distributed" and is_local_root_dir:
+      rootdir_item = self.getWarnItem("In distributed mode hbase.rootdir should point to HDFS.")
+    elif op_mode == "embedded":
+      if distributed.lower() == "false" and hbase_rootdir.startswith('/') or hbase_rootdir.startswith("hdfs://"):
+        rootdir_item = self.getWarnItem("In embedded mode hbase.rootdir cannot point to schemaless values or HDFS, "
+                                        "Example - file:// for localFS")
+      pass
+
+    distributed_item = None
+    if op_mode == "distributed" and not distributed.lower() == "true":
+      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to true for "
+                                           "distributed mode")
+    if op_mode == "embedded" and distributed.lower() == "true":
+      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to false for embedded mode")
+
+    hbase_zk_client_port = properties.get("hbase.zookeeper.property.clientPort")
+    zkPort = self.getZKPort(services)
+    hbase_zk_client_port_item = None
+    if distributed.lower() == "true" and op_mode == "distributed" and \
+        hbase_zk_client_port != zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
+      hbase_zk_client_port_item = self.getErrorItem("In AMS distributed mode, hbase.zookeeper.property.clientPort "
+                                                    "should be the cluster zookeeper server port : {0}".format(zkPort))
+
+    if distributed.lower() == "false" and op_mode == "embedded" and \
+        hbase_zk_client_port == zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
+      hbase_zk_client_port_item = self.getErrorItem("In AMS embedded mode, hbase.zookeeper.property.clientPort "
+                                                    "should be a different port than cluster zookeeper port."
+                                                    "(default:61181)")
+
+    validationItems.extend([{"config-name":'hbase.rootdir', "item": rootdir_item },
+                            {"config-name":'hbase.cluster.distributed', "item": distributed_item },
+                            {"config-name":'hbase.zookeeper.property.clientPort', "item": hbase_zk_client_port_item }])
+
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          if op_mode == 'embedded' or is_local_root_dir:
+            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
+            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.rootdir', host["Hosts"])}])
+            validationItems.extend([{"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.tmp.dir', host["Hosts"])}])
+
+          dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
+          if is_local_root_dir:
+            mountPoints = []
+            for mountPoint in host["Hosts"]["disk_info"]:
+              mountPoints.append(mountPoint["mountpoint"])
+            hbase_rootdir_mountpoint = self.getMountPointForDir(hbase_rootdir, mountPoints)
+            hbase_tmpdir_mountpoint = self.getMountPointForDir(hbase_tmpdir, mountPoints)
+            preferred_mountpoints = self.getPreferredMountPoints(host['Hosts'])
+            # hbase.rootdir and hbase.tmp.dir shouldn't point to the same partition
+            # if multiple preferred_mountpoints exist
+            if hbase_rootdir_mountpoint == hbase_tmpdir_mountpoint and \
+              len(preferred_mountpoints) > 1:
+              item = self.getWarnItem("Consider not using {0} partition for storing metrics temporary data. "
+                                      "{0} partition is already used as hbase.rootdir to store metrics data".format(hbase_tmpdir_mountpoint))
+              validationItems.extend([{"config-name":'hbase.tmp.dir', "item": item}])
+
+            # if METRICS_COLLECTOR is co-hosted with DATANODE
+            # cross-check dfs.datanode.data.dir and hbase.rootdir
+            # they shouldn't share same disk partition IO
+            hdfs_site = self.getSiteProperties(configurations, "hdfs-site")
+            dfs_datadirs = hdfs_site.get("dfs.datanode.data.dir").split(",") if hdfs_site and "dfs.datanode.data.dir" in hdfs_site else []
+            if dn_hosts and collectorHostName in dn_hosts and ams_site and \
+              dfs_datadirs and len(preferred_mountpoints) > len(dfs_datadirs):
+              for dfs_datadir in dfs_datadirs:
+                dfs_datadir_mountpoint = self.getMountPointForDir(dfs_datadir, mountPoints)
+                if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:
+                  item = self.getWarnItem("Consider not using {0} partition for storing metrics data. "
+                                          "{0} is already used by datanode to store HDFS data".format(hbase_rootdir_mountpoint))
+                  validationItems.extend([{"config-name": 'hbase.rootdir', "item": item}])
+                  break
+          # If no local DN in distributed mode
+          elif collectorHostName not in dn_hosts and distributed.lower() == "true":
+            item = self.getWarnItem("It's recommended to install Datanode component on {0} "
+                                    "to speed up IO operations between HDFS and Metrics "
+                                    "Collector in distributed mode ".format(collectorHostName))
+            validationItems.extend([{"config-name": "hbase.cluster.distributed", "item": item}])
+          # Short circuit read should be enabled in distibuted mode
+          # if local DN installed
+          else:
+            validationItems.extend([{"config-name": "dfs.client.read.shortcircuit", "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "dfs.client.read.shortcircuit")}])
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-site")
+
+
+  def validateAmsHbaseEnvConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    ams_env = self.getSiteProperties(configurations, "ams-env")
+    amsHbaseSite = self.getSiteProperties(configurations, "ams-hbase-site")
+    validationItems = []
+    mb = 1024 * 1024
+    gb = 1024 * mb
+
+    regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize") ## FIXME if new service added
+    if regionServerItem:
+      validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
+
+    hbaseMasterHeapsizeItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_master_heapsize")
+    if hbaseMasterHeapsizeItem:
+      validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+
+    logDirItem = self.validatorEqualsPropertyItem(properties, "hbase_log_dir", ams_env, "metrics_collector_log_dir")
+    if logDirItem:
+      validationItems.extend([{"config-name": "hbase_log_dir", "item": logDirItem}])
+
+    hbase_master_heapsize = self.to_number(properties["hbase_master_heapsize"])
+    hbase_master_xmn_size = self.to_number(properties["hbase_master_xmn_size"])
+    hbase_regionserver_heapsize = self.to_number(properties["hbase_regionserver_heapsize"])
+    hbase_regionserver_xmn_size = self.to_number(properties["regionserver_xmn_size"])
+
+    # Validate Xmn settings.
+    masterXmnItem = None
+    regionServerXmnItem = None
+    is_hbase_distributed = amsHbaseSite.get("hbase.cluster.distributed").lower() == 'true'
+
+    if is_hbase_distributed:
+
+      if not regionServerItem and hbase_regionserver_heapsize > 32768:
+        regionServerItem = self.getWarnItem("Value is more than the recommended maximum heap size of 32G.")
+        validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
+
+      minMasterXmn = 0.12 * hbase_master_heapsize
+      maxMasterXmn = 0.2 * hbase_master_heapsize
+      if hbase_master_xmn_size < minMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                         "(12% of hbase_master_heapsize)".format(int(ceil(minMasterXmn))))
+
+      if hbase_master_xmn_size > maxMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                         "(20% of hbase_master_heapsize)".format(int(floor(maxMasterXmn))))
+
+      minRegionServerXmn = 0.12 * hbase_regionserver_heapsize
+      maxRegionServerXmn = 0.2 * hbase_regionserver_heapsize
+      if hbase_regionserver_xmn_size < minRegionServerXmn:
+        regionServerXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                               "(12% of hbase_regionserver_heapsize)"
+                                               .format(int(ceil(minRegionServerXmn))))
+
+      if hbase_regionserver_xmn_size > maxRegionServerXmn:
+        regionServerXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                               "(20% of hbase_regionserver_heapsize)"
+                                               .format(int(floor(maxRegionServerXmn))))
+    else:
+
+      if not hbaseMasterHeapsizeItem and (hbase_master_heapsize + hbase_regionserver_heapsize) > 32768:
+        hbaseMasterHeapsizeItem = self.getWarnItem("Value of Master + Regionserver heapsize is more than the recommended maximum heap size of 32G.")
+        validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+
+      minMasterXmn = 0.12 * (hbase_master_heapsize + hbase_regionserver_heapsize)
+      maxMasterXmn = 0.2 *  (hbase_master_heapsize + hbase_regionserver_heapsize)
+      if hbase_master_xmn_size < minMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                         "(12% of hbase_master_heapsize + hbase_regionserver_heapsize)"
+                                         .format(int(ceil(minMasterXmn))))
+
+      if hbase_master_xmn_size > maxMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                         "(20% of hbase_master_heapsize + hbase_regionserver_heapsize)"
+                                         .format(int(floor(maxMasterXmn))))
+    if masterXmnItem:
+      validationItems.extend([{"config-name": "hbase_master_xmn_size", "item": masterXmnItem}])
+
+    if regionServerXmnItem:
+      validationItems.extend([{"config-name": "regionserver_xmn_size", "item": regionServerXmnItem}])
+
+    if hbaseMasterHeapsizeItem is None:
+      hostMasterComponents = {}
+
+      for service in services["services"]:
+        for component in service["components"]:
+          if component["StackServiceComponents"]["hostnames"] is not None:
+            for hostName in component["StackServiceComponents"]["hostnames"]:
+              if self.isMasterComponent(component):
+                if hostName not in hostMasterComponents.keys():
+                  hostMasterComponents[hostName] = []
+                hostMasterComponents[hostName].append(component["StackServiceComponents"]["component_name"])
+
+      amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+      for collectorHostName in amsCollectorHosts:
+        for host in hosts["items"]:
+          if host["Hosts"]["host_name"] == collectorHostName:
+            # AMS Collector co-hosted with other master components in bigger clusters
+            if len(hosts['items']) > 31 and \
+                            len(hostMasterComponents[collectorHostName]) > 2 and \
+                            host["Hosts"]["total_mem"] < 32*mb: # < 32Gb(total_mem in k)
+              masterHostMessage = "Host {0} is used by multiple master components ({1}). " \
+                                  "It is recommended to use a separate host for the " \
+                                  "Ambari Metrics Collector component and ensure " \
+                                  "the host has sufficient memory available."
+
+              hbaseMasterHeapsizeItem = self.getWarnItem(masterHostMessage.format(
+                  collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
+              if hbaseMasterHeapsizeItem:
+                validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+      pass
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-env")
+
+
+  def validateAmsSiteConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+
+    serviceAdvisor = AMBARI_METRICSServiceAdvisor()
+
+    op_mode = properties.get("timeline.metrics.service.operation.mode")
+    correct_op_mode_item = None
+    if op_mode not in ("embedded", "distributed"):
+      correct_op_mode_item = self.getErrorItem("Correct value should be set.")
+      pass
+    elif len(self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")) > 1 and op_mode != 'distributed':
+      correct_op_mode_item = self.getErrorItem("Correct value should be 'distributed' for clusters with more then 1 Metrics collector")
+    elif op_mode == 'embedded':
+      collector_heapsize, hbase_heapsize, total_sinks_count = serviceAdvisor.getAmsMemoryRecommendation(services, hosts)
+      if total_sinks_count > 1000:
+        correct_op_mode_item = self.getWarnItem("Number of sinks writing metrics to collector is expected to be more than 1000. "
+                                                "'Embedded' mode AMS might not be able to handle the load. Consider moving to distributed mode.")
+
+    validationItems.extend([{"config-name":'timeline.metrics.service.operation.mode', "item": correct_op_mode_item }])
+    return self.toConfigurationValidationProblems(validationItems, "ams-site")
+
+
+  def validateAmsEnvConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    validationItems = []
+    collectorHeapsizeDefaultItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "metrics_collector_heapsize")
+    validationItems.extend([{"config-name": "metrics_collector_heapsize", "item": collectorHeapsizeDefaultItem}])
+
+    ams_env = self.getSiteProperties(configurations, "ams-env")
+    collector_heapsize = self.to_number(ams_env.get("metrics_collector_heapsize"))
+    if collector_heapsize > 32768:
+      collectorHeapsizeMaxItem = self.getWarnItem("Value is more than the recommended maximum heap size of 32G.")
+      validationItems.extend([{"config-name": "metrics_collector_heapsize", "item": collectorHeapsizeMaxItem}])
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-env")
+
+
+  def validateGrafanaEnvConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+
+    grafana_pwd = properties.get("metrics_grafana_password")
+    grafana_pwd_length_item = None
+    if len(grafana_pwd) < 4:
+      grafana_pwd_length_item = self.getErrorItem("Grafana password length should be at least 4.")
+      pass
+    validationItems.extend([{"config-name":'metrics_grafana_password', "item": grafana_pwd_length_item }])
+    return self.toConfigurationValidationProblems(validationItems, "ams-site")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
index a2e31cc..058e086 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
@@ -74,7 +74,10 @@ class AtlasServiceAdvisor(service_advisor.ServiceAdvisor):
     Modify the dictionary of heap size properties.
     Must be overriden in child class.
     """
-    pass
+    self.heap_size_properties = {"ATLAS_SERVER":
+                                   [{"config-name": "atlas-env",
+                                     "property": "atlas_server_xmx",
+                                     "default": "2048m"}]}
 
   def modifyNotValuableComponents(self):
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 5307176..a194332 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -172,7 +172,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       "HDFS": self.recommendHDFSConfigurations,
       "HBASE": self.recommendHbaseConfigurations,
       "STORM": self.recommendStormConfigurations,
-      "AMBARI_METRICS": self.recommendAmsConfigurations,
       "RANGER": self.recommendRangerConfigurations,
       "ZOOKEEPER": self.recommendZookeeperConfigurations,
       "OOZIE": self.recommendOozieConfigurations
@@ -509,66 +508,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
                 putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
 
 
-  def getAmsMemoryRecommendation(self, services, hosts):
-    # MB per sink in hbase heapsize
-    HEAP_PER_MASTER_COMPONENT = 50
-    HEAP_PER_SLAVE_COMPONENT = 10
-
-    schMemoryMap = {
-      "HDFS": {
-        "NAMENODE": HEAP_PER_MASTER_COMPONENT,
-        "SECONDARY_NAMENODE": HEAP_PER_MASTER_COMPONENT,
-        "DATANODE": HEAP_PER_SLAVE_COMPONENT
-      },
-      "YARN": {
-        "RESOURCEMANAGER": HEAP_PER_MASTER_COMPONENT,
-        "NODEMANAGER": HEAP_PER_SLAVE_COMPONENT,
-        "HISTORYSERVER" : HEAP_PER_MASTER_COMPONENT,
-        "APP_TIMELINE_SERVER": HEAP_PER_MASTER_COMPONENT
-      },
-      "HBASE": {
-        "HBASE_MASTER": HEAP_PER_MASTER_COMPONENT,
-        "HBASE_REGIONSERVER": HEAP_PER_SLAVE_COMPONENT
-      },
-      "HIVE": {
-        "HIVE_METASTORE": HEAP_PER_MASTER_COMPONENT,
-        "HIVE_SERVER": HEAP_PER_MASTER_COMPONENT
-      },
-      "KAFKA": {
-        "KAFKA_BROKER": HEAP_PER_MASTER_COMPONENT
-      },
-      "FLUME": {
-        "FLUME_HANDLER": HEAP_PER_SLAVE_COMPONENT
-      },
-      "STORM": {
-        "NIMBUS": HEAP_PER_MASTER_COMPONENT,
-      },
-      "AMBARI_METRICS": {
-        "METRICS_COLLECTOR": HEAP_PER_MASTER_COMPONENT,
-        "METRICS_MONITOR": HEAP_PER_SLAVE_COMPONENT
-      },
-      "ACCUMULO": {
-        "ACCUMULO_MASTER": HEAP_PER_MASTER_COMPONENT,
-        "ACCUMULO_TSERVER": HEAP_PER_SLAVE_COMPONENT
-      },
-      "LOGSEARCH": {
-        "LOGSEARCH_LOGFEEDER" : HEAP_PER_SLAVE_COMPONENT
-      }
-    }
-    total_sinks_count = 0
-    # minimum heap size
-    hbase_heapsize = 500
-    for serviceName, componentsDict in schMemoryMap.items():
-      for componentName, multiplier in componentsDict.items():
-        schCount = len(
-          self.getHostsWithComponent(serviceName, componentName, services,
-                                     hosts))
-        hbase_heapsize += int((schCount * multiplier))
-        total_sinks_count += schCount
-    collector_heapsize = int(hbase_heapsize/3 if hbase_heapsize > 2048 else 512)
-    hbase_heapsize = min(hbase_heapsize, 32768)
-
-    return round_to_n(collector_heapsize), round_to_n(hbase_heapsize), total_sinks_count
 
   def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
     putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
@@ -577,216 +516,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     if 'AMBARI_METRICS' in servicesList:
       putStormSiteProperty('metrics.reporter.register', 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter')
 
-  def recommendAmsConfigurations(self, configurations, clusterData, services, hosts):
-    putAmsEnvProperty = self.putProperty(configurations, "ams-env", services)
-    putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site", services)
-    putAmsSiteProperty = self.putProperty(configurations, "ams-site", services)
-    putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env", services)
-    putGrafanaProperty = self.putProperty(configurations, "ams-grafana-env", services)
-    putGrafanaPropertyAttribute = self.putPropertyAttribute(configurations, "ams-grafana-env")
-
-    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-
-    # TODO set "timeline.metrics.service.webapp.address" to 0.0.0.0:port in upgrade catalog
-    timeline_metrics_service_webapp_address = '0.0.0.0'
-
-    putAmsSiteProperty("timeline.metrics.service.webapp.address", str(timeline_metrics_service_webapp_address) + ":6188")
-
-    log_dir = "/var/log/ambari-metrics-collector"
-    if "ams-env" in services["configurations"]:
-      if "metrics_collector_log_dir" in services["configurations"]["ams-env"]["properties"]:
-        log_dir = services["configurations"]["ams-env"]["properties"]["metrics_collector_log_dir"]
-      putHbaseEnvProperty("hbase_log_dir", log_dir)
-
-    defaultFs = 'file:///'
-    if "core-site" in services["configurations"] and \
-      "fs.defaultFS" in services["configurations"]["core-site"]["properties"]:
-      defaultFs = services["configurations"]["core-site"]["properties"]["fs.defaultFS"]
-
-    operatingMode = "embedded"
-    if "ams-site" in services["configurations"]:
-      if "timeline.metrics.service.operation.mode" in services["configurations"]["ams-site"]["properties"]:
-        operatingMode = services["configurations"]["ams-site"]["properties"]["timeline.metrics.service.operation.mode"]
-
-    if len(amsCollectorHosts) > 1 :
-      operatingMode = "distributed"
-      putAmsSiteProperty("timeline.metrics.service.operation.mode", operatingMode)
-
-    if operatingMode == "distributed":
-      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'true')
-      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'true')
-    else:
-      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'false')
-      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'false')
-
-    rootDir = "file:///var/lib/ambari-metrics-collector/hbase"
-    tmpDir = "/var/lib/ambari-metrics-collector/hbase-tmp"
-    zk_port_default = []
-    if "ams-hbase-site" in services["configurations"]:
-      if "hbase.rootdir" in services["configurations"]["ams-hbase-site"]["properties"]:
-        rootDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.rootdir"]
-      if "hbase.tmp.dir" in services["configurations"]["ams-hbase-site"]["properties"]:
-        tmpDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.tmp.dir"]
-      if "hbase.zookeeper.property.clientPort" in services["configurations"]["ams-hbase-site"]["properties"]:
-        zk_port_default = services["configurations"]["ams-hbase-site"]["properties"]["hbase.zookeeper.property.clientPort"]
-
-      # Skip recommendation item if default value is present
-    if operatingMode == "distributed" and not "{{zookeeper_clientPort}}" in zk_port_default:
-      zkPort = self.getZKPort(services)
-      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", zkPort)
-    elif operatingMode == "embedded" and not "{{zookeeper_clientPort}}" in zk_port_default:
-      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", "61181")
-
-    mountpoints = ["/"]
-    for collectorHostName in amsCollectorHosts:
-      for host in hosts["items"]:
-        if host["Hosts"]["host_name"] == collectorHostName:
-          mountpoints = self.getPreferredMountPoints(host["Hosts"])
-          break
-    isLocalRootDir = rootDir.startswith("file://") or (defaultFs.startswith("file://") and rootDir.startswith("/"))
-    if isLocalRootDir:
-      rootDir = re.sub("^file:///|/", "", rootDir, count=1)
-      rootDir = "file://" + os.path.join(mountpoints[0], rootDir)
-    tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
-    if len(mountpoints) > 1 and isLocalRootDir:
-      tmpDir = os.path.join(mountpoints[1], tmpDir)
-    else:
-      tmpDir = os.path.join(mountpoints[0], tmpDir)
-    putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
-
-    if operatingMode == "distributed":
-      putAmsHbaseSiteProperty("hbase.rootdir", "/user/ams/hbase")
-
-    if operatingMode == "embedded":
-      if isLocalRootDir:
-        putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
-      else:
-        putAmsHbaseSiteProperty("hbase.rootdir", "file:///var/lib/ambari-metrics-collector/hbase")
-
-    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
-
-    putAmsEnvProperty("metrics_collector_heapsize", collector_heapsize)
-
-    putAmsSiteProperty("timeline.metrics.cache.size", max(100, int(log(total_sinks_count)) * 100))
-    putAmsSiteProperty("timeline.metrics.cache.commit.interval", min(10, max(12 - int(log(total_sinks_count)), 2)))
-
-    # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
-    putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
-    putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 134217728)
-    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
-    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
-
-    if len(amsCollectorHosts) > 1:
-      pass
-    else:
-      # blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3
-      if total_sinks_count >= 2000:
-        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
-        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
-        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
-        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
-        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.3)
-        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.25)
-        putAmsHbaseSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 20)
-        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 81920000)
-        putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
-        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 10000)
-      elif total_sinks_count >= 1000:
-        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
-        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
-        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
-        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
-        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 40960000)
-        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 5000)
-      else:
-        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 20480000)
-      pass
-
-    metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))
-    putAmsSiteProperty("timeline.metrics.service.handler.thread.count", metrics_api_handlers)
-
-    # Distributed mode heap size
-    if operatingMode == "distributed":
-      hbase_heapsize = max(hbase_heapsize, 1024)
-      putHbaseEnvProperty("hbase_master_heapsize", "512")
-      putHbaseEnvProperty("hbase_master_xmn_size", "102") #20% of 512 heap size
-      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
-      putHbaseEnvProperty("regionserver_xmn_size", round_to_n(0.15 * hbase_heapsize,64))
-    else:
-      # Embedded mode heap size : master + regionserver
-      hbase_rs_heapsize = 512
-      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_rs_heapsize)
-      putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
-      putHbaseEnvProperty("hbase_master_xmn_size", round_to_n(0.15*(hbase_heapsize + hbase_rs_heapsize),64))
-
-    # If no local DN in distributed mode
-    if operatingMode == "distributed":
-      dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
-      # call by Kerberos wizard sends only the service being affected
-      # so it is possible for dn_hosts to be None but not amsCollectorHosts
-      if dn_hosts and len(dn_hosts) > 0:
-        if set(amsCollectorHosts).intersection(dn_hosts):
-          collector_cohosted_with_dn = "true"
-        else:
-          collector_cohosted_with_dn = "false"
-        putAmsHbaseSiteProperty("dfs.client.read.shortcircuit", collector_cohosted_with_dn)
-
-    #split points
-    scriptDir = os.path.dirname(os.path.abspath(__file__))
-    metricsDir = os.path.join(scriptDir, '../../../../common-services/AMBARI_METRICS/0.1.0/package')
-    serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
-    customServiceMetricsDir = os.path.join(scriptDir, '../../../../dashboards/service-metrics')
-    sys.path.append(os.path.join(metricsDir, 'scripts'))
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-
-    from split_points import FindSplitPointsForAMSRegions
-
-    ams_hbase_site = None
-    ams_hbase_env = None
-
-    # Overriden properties form the UI
-    if "ams-hbase-site" in services["configurations"]:
-      ams_hbase_site = services["configurations"]["ams-hbase-site"]["properties"]
-    if "ams-hbase-env" in services["configurations"]:
-       ams_hbase_env = services["configurations"]["ams-hbase-env"]["properties"]
-
-    # Recommendations
-    if not ams_hbase_site:
-      ams_hbase_site = configurations["ams-hbase-site"]["properties"]
-    if not ams_hbase_env:
-      ams_hbase_env = configurations["ams-hbase-env"]["properties"]
-
-    split_point_finder = FindSplitPointsForAMSRegions(
-      ams_hbase_site, ams_hbase_env, serviceMetricsDir, customServiceMetricsDir, operatingMode, servicesList)
-
-    result = split_point_finder.get_split_points()
-    precision_splits = ' '
-    aggregate_splits = ' '
-    if result.precision:
-      precision_splits = result.precision
-    if result.aggregate:
-      aggregate_splits = result.aggregate
-    putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
-    putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
-
-    component_grafana_exists = False
-    for service in services['services']:
-      if 'components' in service:
-        for component in service['components']:
-          if 'StackServiceComponents' in component:
-            # If Grafana is installed the hostnames would indicate its location
-            if 'METRICS_GRAFANA' in component['StackServiceComponents']['component_name'] and\
-              len(component['StackServiceComponents']['hostnames']) != 0:
-              component_grafana_exists = True
-              break
-    pass
-
-    if not component_grafana_exists:
-      putGrafanaPropertyAttribute("metrics_grafana_password", "visible", "false")
-
-    pass
-
-
 
   def getServiceConfigurationValidators(self):
     return {
@@ -797,12 +526,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       "YARN": {"yarn-site": self.validateYARNConfigurations,
                "yarn-env": self.validateYARNEnvConfigurations},
       "HBASE": {"hbase-env": self.validateHbaseEnvConfigurations},
-      "STORM": {"storm-site": self.validateStormConfigurations},
-      "AMBARI_METRICS": {"ams-hbase-site": self.validateAmsHbaseSiteConfigurations,
-              "ams-hbase-env": self.validateAmsHbaseEnvConfigurations,
-              "ams-site": self.validateAmsSiteConfigurations,
-              "ams-env": self.validateAmsEnvConfigurations,
-               "ams-grafana-env": self.validateGrafanaEnvConfigurations}
+      "STORM": {"storm-site": self.validateStormConfigurations}
     }
 
   def validateMinMax(self, items, recommendedDefaults, configurations):
@@ -834,148 +558,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       items.extend(self.toConfigurationValidationProblems(validationItems, configName))
     pass
 
-  def validateAmsSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = []
 
-    op_mode = properties.get("timeline.metrics.service.operation.mode")
-    correct_op_mode_item = None
-    if op_mode not in ("embedded", "distributed"):
-      correct_op_mode_item = self.getErrorItem("Correct value should be set.")
-      pass
-    elif len(self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")) > 1 and op_mode != 'distributed':
-      correct_op_mode_item = self.getErrorItem("Correct value should be 'distributed' for clusters with more then 1 Metrics collector")
-    elif op_mode == 'embedded':
-      collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
-      if total_sinks_count > 1000:
-        correct_op_mode_item = self.getWarnItem("Number of sinks writing metrics to collector is expected to be more than 1000. "
-                                                "'Embedded' mode AMS might not be able to handle the load. Consider moving to distributed mode.")
-
-    validationItems.extend([{"config-name":'timeline.metrics.service.operation.mode', "item": correct_op_mode_item }])
-    return self.toConfigurationValidationProblems(validationItems, "ams-site")
-
-  def validateGrafanaEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = []
-
-    grafana_pwd = properties.get("metrics_grafana_password")
-    grafana_pwd_length_item = None
-    if len(grafana_pwd) < 4:
-      grafana_pwd_length_item = self.getErrorItem("Grafana password length should be at least 4.")
-      pass
-    validationItems.extend([{"config-name":'metrics_grafana_password', "item": grafana_pwd_length_item }])
-    return self.toConfigurationValidationProblems(validationItems, "ams-site")
-
-  def validateAmsHbaseSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-
-    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-    ams_site = getSiteProperties(configurations, "ams-site")
-    core_site = getSiteProperties(configurations, "core-site")
-
-    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
-    recommendedDiskSpace = 10485760
-    # TODO validate configuration for multiple AMBARI_METRICS collectors
-    if len(amsCollectorHosts) > 1:
-      pass
-    else:
-      if total_sinks_count > 2000:
-        recommendedDiskSpace  = 104857600  # * 1k == 100 Gb
-      elif total_sinks_count > 500:
-        recommendedDiskSpace  = 52428800  # * 1k == 50 Gb
-      elif total_sinks_count > 250:
-        recommendedDiskSpace  = 20971520  # * 1k == 20 Gb
-
-    validationItems = []
-
-    rootdir_item = None
-    op_mode = ams_site.get("timeline.metrics.service.operation.mode")
-    default_fs = core_site.get("fs.defaultFS") if core_site else "file:///"
-    hbase_rootdir = properties.get("hbase.rootdir")
-    hbase_tmpdir = properties.get("hbase.tmp.dir")
-    distributed = properties.get("hbase.cluster.distributed")
-    is_local_root_dir = hbase_rootdir.startswith("file://") or (default_fs.startswith("file://") and hbase_rootdir.startswith("/"))
-
-    if op_mode == "distributed" and is_local_root_dir:
-      rootdir_item = self.getWarnItem("In distributed mode hbase.rootdir should point to HDFS.")
-    elif op_mode == "embedded":
-      if distributed.lower() == "false" and hbase_rootdir.startswith('/') or hbase_rootdir.startswith("hdfs://"):
-        rootdir_item = self.getWarnItem("In embedded mode hbase.rootdir cannot point to schemaless values or HDFS, "
-                                        "Example - file:// for localFS")
-      pass
-
-    distributed_item = None
-    if op_mode == "distributed" and not distributed.lower() == "true":
-      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to true for "
-                                           "distributed mode")
-    if op_mode == "embedded" and distributed.lower() == "true":
-      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to false for embedded mode")
-
-    hbase_zk_client_port = properties.get("hbase.zookeeper.property.clientPort")
-    zkPort = self.getZKPort(services)
-    hbase_zk_client_port_item = None
-    if distributed.lower() == "true" and op_mode == "distributed" and \
-        hbase_zk_client_port != zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
-      hbase_zk_client_port_item = self.getErrorItem("In AMS distributed mode, hbase.zookeeper.property.clientPort "
-                                                    "should be the cluster zookeeper server port : {0}".format(zkPort))
-
-    if distributed.lower() == "false" and op_mode == "embedded" and \
-        hbase_zk_client_port == zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
-      hbase_zk_client_port_item = self.getErrorItem("In AMS embedded mode, hbase.zookeeper.property.clientPort "
-                                                    "should be a different port than cluster zookeeper port."
-                                                    "(default:61181)")
-
-    validationItems.extend([{"config-name":'hbase.rootdir', "item": rootdir_item },
-                            {"config-name":'hbase.cluster.distributed', "item": distributed_item },
-                            {"config-name":'hbase.zookeeper.property.clientPort', "item": hbase_zk_client_port_item }])
-
-    for collectorHostName in amsCollectorHosts:
-      for host in hosts["items"]:
-        if host["Hosts"]["host_name"] == collectorHostName:
-          if op_mode == 'embedded' or is_local_root_dir:
-            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
-            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.rootdir', host["Hosts"])}])
-            validationItems.extend([{"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.tmp.dir', host["Hosts"])}])
-
-          dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
-          if is_local_root_dir:
-            mountPoints = []
-            for mountPoint in host["Hosts"]["disk_info"]:
-              mountPoints.append(mountPoint["mountpoint"])
-            hbase_rootdir_mountpoint = self.getMountPointForDir(hbase_rootdir, mountPoints)
-            hbase_tmpdir_mountpoint = self.getMountPointForDir(hbase_tmpdir, mountPoints)
-            preferred_mountpoints = self.getPreferredMountPoints(host['Hosts'])
-            # hbase.rootdir and hbase.tmp.dir shouldn't point to the same partition
-            # if multiple preferred_mountpoints exist
-            if hbase_rootdir_mountpoint == hbase_tmpdir_mountpoint and \
-              len(preferred_mountpoints) > 1:
-              item = self.getWarnItem("Consider not using {0} partition for storing metrics temporary data. "
-                                      "{0} partition is already used as hbase.rootdir to store metrics data".format(hbase_tmpdir_mountpoint))
-              validationItems.extend([{"config-name":'hbase.tmp.dir', "item": item}])
-
-            # if METRICS_COLLECTOR is co-hosted with DATANODE
-            # cross-check dfs.datanode.data.dir and hbase.rootdir
-            # they shouldn't share same disk partition IO
-            hdfs_site = getSiteProperties(configurations, "hdfs-site")
-            dfs_datadirs = hdfs_site.get("dfs.datanode.data.dir").split(",") if hdfs_site and "dfs.datanode.data.dir" in hdfs_site else []
-            if dn_hosts and collectorHostName in dn_hosts and ams_site and \
-              dfs_datadirs and len(preferred_mountpoints) > len(dfs_datadirs):
-              for dfs_datadir in dfs_datadirs:
-                dfs_datadir_mountpoint = self.getMountPointForDir(dfs_datadir, mountPoints)
-                if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:
-                  item = self.getWarnItem("Consider not using {0} partition for storing metrics data. "
-                                          "{0} is already used by datanode to store HDFS data".format(hbase_rootdir_mountpoint))
-                  validationItems.extend([{"config-name": 'hbase.rootdir', "item": item}])
-                  break
-          # If no local DN in distributed mode
-          elif collectorHostName not in dn_hosts and distributed.lower() == "true":
-            item = self.getWarnItem("It's recommended to install Datanode component on {0} "
-                                    "to speed up IO operations between HDFS and Metrics "
-                                    "Collector in distributed mode ".format(collectorHostName))
-            validationItems.extend([{"config-name": "hbase.cluster.distributed", "item": item}])
-          # Short circuit read should be enabled in distibuted mode
-          # if local DN installed
-          else:
-            validationItems.extend([{"config-name": "dfs.client.read.shortcircuit", "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "dfs.client.read.shortcircuit")}])
-
-    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-site")
 
   def validateStormConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = []
@@ -990,132 +573,9 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
     return self.toConfigurationValidationProblems(validationItems, "storm-site")
 
-  def validateAmsHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-
-    ams_env = getSiteProperties(configurations, "ams-env")
-    amsHbaseSite = getSiteProperties(configurations, "ams-hbase-site")
-    validationItems = []
-    mb = 1024 * 1024
-    gb = 1024 * mb
-
-    regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize") ## FIXME if new service added
-    if regionServerItem:
-      validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
-
-    hbaseMasterHeapsizeItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_master_heapsize")
-    if hbaseMasterHeapsizeItem:
-      validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
-
-    logDirItem = self.validatorEqualsPropertyItem(properties, "hbase_log_dir", ams_env, "metrics_collector_log_dir")
-    if logDirItem:
-      validationItems.extend([{"config-name": "hbase_log_dir", "item": logDirItem}])
-
-    hbase_master_heapsize = self.to_number(properties["hbase_master_heapsize"])
-    hbase_master_xmn_size = self.to_number(properties["hbase_master_xmn_size"])
-    hbase_regionserver_heapsize = self.to_number(properties["hbase_regionserver_heapsize"])
-    hbase_regionserver_xmn_size = self.to_number(properties["regionserver_xmn_size"])
-
-    # Validate Xmn settings.
-    masterXmnItem = None
-    regionServerXmnItem = None
-    is_hbase_distributed = amsHbaseSite.get("hbase.cluster.distributed").lower() == 'true'
-
-    if is_hbase_distributed:
-
-      if not regionServerItem and hbase_regionserver_heapsize > 32768:
-        regionServerItem = self.getWarnItem("Value is more than the recommended maximum heap size of 32G.")
-        validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
-
-      minMasterXmn = 0.12 * hbase_master_heapsize
-      maxMasterXmn = 0.2 * hbase_master_heapsize
-      if hbase_master_xmn_size < minMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
-                                         "(12% of hbase_master_heapsize)".format(int(ceil(minMasterXmn))))
-
-      if hbase_master_xmn_size > maxMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
-                                         "(20% of hbase_master_heapsize)".format(int(floor(maxMasterXmn))))
-
-      minRegionServerXmn = 0.12 * hbase_regionserver_heapsize
-      maxRegionServerXmn = 0.2 * hbase_regionserver_heapsize
-      if hbase_regionserver_xmn_size < minRegionServerXmn:
-        regionServerXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
-                                               "(12% of hbase_regionserver_heapsize)"
-                                               .format(int(ceil(minRegionServerXmn))))
-
-      if hbase_regionserver_xmn_size > maxRegionServerXmn:
-        regionServerXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
-                                               "(20% of hbase_regionserver_heapsize)"
-                                               .format(int(floor(maxRegionServerXmn))))
-    else:
-
-      if not hbaseMasterHeapsizeItem and (hbase_master_heapsize + hbase_regionserver_heapsize) > 32768:
-        hbaseMasterHeapsizeItem = self.getWarnItem("Value of Master + Regionserver heapsize is more than the recommended maximum heap size of 32G.")
-        validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
-
-      minMasterXmn = 0.12 * (hbase_master_heapsize + hbase_regionserver_heapsize)
-      maxMasterXmn = 0.2 *  (hbase_master_heapsize + hbase_regionserver_heapsize)
-      if hbase_master_xmn_size < minMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
-                                         "(12% of hbase_master_heapsize + hbase_regionserver_heapsize)"
-                                         .format(int(ceil(minMasterXmn))))
-
-      if hbase_master_xmn_size > maxMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
-                                         "(20% of hbase_master_heapsize + hbase_regionserver_heapsize)"
-                                         .format(int(floor(maxMasterXmn))))
-    if masterXmnItem:
-      validationItems.extend([{"config-name": "hbase_master_xmn_size", "item": masterXmnItem}])
-
-    if regionServerXmnItem:
-      validationItems.extend([{"config-name": "regionserver_xmn_size", "item": regionServerXmnItem}])
-
-    if hbaseMasterHeapsizeItem is None:
-      hostMasterComponents = {}
-
-      for service in services["services"]:
-        for component in service["components"]:
-          if component["StackServiceComponents"]["hostnames"] is not None:
-            for hostName in component["StackServiceComponents"]["hostnames"]:
-              if self.isMasterComponent(component):
-                if hostName not in hostMasterComponents.keys():
-                  hostMasterComponents[hostName] = []
-                hostMasterComponents[hostName].append(component["StackServiceComponents"]["component_name"])
-
-      amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-      for collectorHostName in amsCollectorHosts:
-        for host in hosts["items"]:
-          if host["Hosts"]["host_name"] == collectorHostName:
-            # AMS Collector co-hosted with other master components in bigger clusters
-            if len(hosts['items']) > 31 and \
-                            len(hostMasterComponents[collectorHostName]) > 2 and \
-                            host["Hosts"]["total_mem"] < 32*mb: # < 32Gb(total_mem in k)
-              masterHostMessage = "Host {0} is used by multiple master components ({1}). " \
-                                  "It is recommended to use a separate host for the " \
-                                  "Ambari Metrics Collector component and ensure " \
-                                  "the host has sufficient memory available."
-
-              hbaseMasterHeapsizeItem = self.getWarnItem(masterHostMessage.format(
-                  collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
-              if hbaseMasterHeapsizeItem:
-                validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
-      pass
 
-    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-env")
-
-  def validateAmsEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-
-    validationItems = []
-    collectorHeapsizeDefaultItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "metrics_collector_heapsize")
-    validationItems.extend([{"config-name": "metrics_collector_heapsize", "item": collectorHeapsizeDefaultItem}])
 
-    ams_env = getSiteProperties(configurations, "ams-env")
-    collector_heapsize = self.to_number(ams_env.get("metrics_collector_heapsize"))
-    if collector_heapsize > 32768:
-      collectorHeapsizeMaxItem = self.getWarnItem("Value is more than the recommended maximum heap size of 32G.")
-      validationItems.extend([{"config-name": "metrics_collector_heapsize", "item": collectorHeapsizeMaxItem}])
 
-    return self.toConfigurationValidationProblems(validationItems, "ams-env")
 
   def getMemorySizeRequired(self, services, components, configurations):
     totalMemoryRequired = 512*1024*1024 # 512Mb for OS needs

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 2dc1738..4cb0d9e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -91,7 +91,6 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       "HBASE": self.recommendHBASEConfigurations,
       "MAPREDUCE2": self.recommendMapReduce2Configurations,
       "TEZ": self.recommendTezConfigurations,
-      "AMBARI_METRICS": self.recommendAmsConfigurations,
       "YARN": self.recommendYARNConfigurations,
       "STORM": self.recommendStormConfigurations,
       "KNOX": self.recommendKnoxConfigurations,