You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2016/04/26 01:42:36 UTC

[1/2] ambari git commit: HiveServerInteractive. (1). AMBARI-16078. Config calculations used by LLAP app. (2). AMBARI-16086. Hide 'llap' queue capacity slider when 'llap' queue is not being used for LLAP app. (3). AMBARI-16087. LLAP queue is not selected

Repository: ambari
Updated Branches:
  refs/heads/trunk b9bbb6a5f -> 29179e766


http://git-wip-us.apache.org/repos/asf/ambari/blob/29179e76/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index ea9a924..fe5aee2 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -22,7 +22,6 @@ import socket
 from unittest import TestCase
 from mock.mock import patch, MagicMock
 
-
 class TestHDP25StackAdvisor(TestCase):
 
   def setUp(self):
@@ -206,6 +205,8 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEquals(res, res_expected)
     pass
 
+
+
   def test_recommendYARNConfigurations(self):
     ################ Setting up Inputs. #########################
     hosts = {
@@ -292,6 +293,13 @@ class TestHDP25StackAdvisor(TestCase):
           ]
         }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -317,6 +325,12 @@ class TestHDP25StackAdvisor(TestCase):
               'llap_queue_capacity':'0'
             }
         },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name':'default'
+            }
+          },
         "hive-env":
           {
             'properties': {
@@ -380,6 +394,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -468,6 +489,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -493,6 +521,12 @@ class TestHDP25StackAdvisor(TestCase):
               'llap_queue_capacity':'40'
             }
           },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name':'default'
+            }
+          },
         "hive-env":
           {
             'properties': {
@@ -556,6 +590,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -589,6 +630,12 @@ class TestHDP25StackAdvisor(TestCase):
               'enable_hive_interactive': 'false'
             }
           },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name':'default'
+            }
+          },
         "hive-env":
           {
             'properties': {
@@ -652,6 +699,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'0',
+          u'type': u'hive-interactive-env',
+          u'name': u'llap_queue_capacity'
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -687,12 +741,24 @@ class TestHDP25StackAdvisor(TestCase):
               'llap_queue_capacity':'0'
             }
           },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'llap'
+            }
+          },
         "hive-env":
           {
             'properties': {
               'hive_user': 'hive'
             }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "682",
+            "yarn.nodemanager.resource.memory-mb": "2048"
           }
+        }
       }
     }
 
@@ -750,6 +816,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -872,6 +945,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -946,6 +1026,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -1024,6 +1111,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -1120,6 +1214,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'off',
+          u'type': u'hive-interactive-env',
+          u'name': u'enable_hive_interactive'
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -1154,6 +1255,26 @@ class TestHDP25StackAdvisor(TestCase):
               'llap_queue_capacity':'0'
             }
           },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "341",
+            "yarn.nodemanager.resource.memory-mb": "20000",
+            "yarn.nodemanager.resource.cpu-vcores": '1'
+          }
+        },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name':'llap',
+              'hive.server2.tez.sessions.per.default.queue' : '1',
+              'hive.tez.container.size': '341'
+            }
+          },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "341"
+          }
+        },
         "hive-env":
           {
             'properties': {
@@ -1216,6 +1337,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -1250,6 +1378,12 @@ class TestHDP25StackAdvisor(TestCase):
               'llap_queue_capacity':'40'
             }
           },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name':'llap'
+            }
+          },
         "hive-env":
           {
             'properties': {
@@ -1315,6 +1449,13 @@ class TestHDP25StackAdvisor(TestCase):
         ]
       }
       ],
+      "changed-configurations": [
+        {
+          u'old_value': u'',
+          u'type': u'',
+          u'name': u''
+        }
+      ],
       "configurations": {
         "capacity-scheduler": {
           "properties": {
@@ -1385,85 +1526,1247 @@ class TestHDP25StackAdvisor(TestCase):
     }
 
 
-
-
-    # Expected config outputs.
-
-    # Expected capacity-scheduler with 'llap' (size:20) and 'default' queue at root level.
-    expected_capacity_scheduler_llap_queue_size_20 = {
-      "properties": {
-        "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=80\n'
-                              'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
-                              'yarn.scheduler.capacity.root.capacity=100\n'
-                              'yarn.scheduler.capacity.root.queues=default,llap\n'
-                              'yarn.scheduler.capacity.maximum-applications=10000\n'
-                              'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
-                              'yarn.scheduler.capacity.root.default.state=RUNNING\n'
-                              'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
-                              'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
-                              'yarn.scheduler.capacity.root.default.capacity=80\n'
-                              'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
-                              'yarn.scheduler.capacity.node-locality-delay=40\n'
-                              'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
-                              'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
-                              'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
-                              'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
-                              'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
-                              'yarn.scheduler.capacity.root.llap.maximum-capacity=20\n'
-                              'yarn.scheduler.capacity.root.llap.capacity=20\n'
-                              'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
-                              'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
-                              'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
-
+    # Services 13: YARN service with : (1). 'capacity scheduler' having 'llap' and 'default' queue at root level and
+    # (2). 'enable_hive_interactive' is ON and (3). configuration change detected for 'llap_queue_capacity'
+    services_13 = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6403.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
       }
-    }
+      ],
+      "changed-configurations": [
+        {
+          u'old_value': u'0',
+          u'type': u'hive-interactive-env',
+          u'name': u'llap_queue_capacity'
+        }
+      ],
+      "configurations": {
+        "capacity-scheduler": {
+          "properties": {
+            "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
+                                  'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                                  'yarn.scheduler.capacity.root.capacity=100\n'
+                                  'yarn.scheduler.capacity.root.queues=default,llap\n'
+                                  'yarn.scheduler.capacity.maximum-applications=10000\n'
+                                  'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                                  'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                                  'yarn.scheduler.capacity.root.default.capacity=60\n'
+                                  'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                                  'yarn.scheduler.capacity.node-locality-delay=40\n'
+                                  'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                                  'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                                  'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
 
-    # Expected capacity-scheduler with 'llap' (size:40) and 'default' queue at root level.
-    expected_capacity_scheduler_llap_queue_size_40 = {
-      "properties": {
-        "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
-                              'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
-                              'yarn.scheduler.capacity.root.capacity=100\n'
-                              'yarn.scheduler.capacity.root.queues=default,llap\n'
-                              'yarn.scheduler.capacity.maximum-applications=10000\n'
-                              'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
-                              'yarn.scheduler.capacity.root.default.state=RUNNING\n'
-                              'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
-                              'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
-                              'yarn.scheduler.capacity.root.default.capacity=60\n'
-                              'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
-                              'yarn.scheduler.capacity.node-locality-delay=40\n'
-                              'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
-                              'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
-                              'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
-                              'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
-                              'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
-                              'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
-                              'yarn.scheduler.capacity.root.llap.capacity=40\n'
-                              'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
-                              'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
-                              'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'true',
+              'llap_queue_capacity':'40'
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'llap',
+              'hive.server2.tez.sessions.per.default.queue': '1',
+              'hive.tez.container.size': '341'
 
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "341",
+            "yarn.nodemanager.resource.memory-mb": "4096",
+            "yarn.nodemanager.resource.cpu-vcores": '1'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "341"
+          }
+        }
       }
     }
 
-    # Expected capacity-scheduler with 'llap' state = STOPPED, cap = 0 % and 'default' queue cap to 100%.
-    expected_capacity_scheduler_llap_Stopped_size_0 = {
-      "properties": {
-        "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=100\n'
-                              'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
-                              'yarn.scheduler.capacity.root.capacity=100\n'
-                              'yarn.scheduler.capacity.root.queues=default,llap\n'
-                              'yarn.scheduler.capacity.maximum-applications=10000\n'
-                              'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
-                              'yarn.scheduler.capacity.root.default.state=RUNNING\n'
-                              'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
-                              'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
-                              'yarn.scheduler.capacity.root.default.capacity=100\n'
-                              'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
-                              'yarn.scheduler.capacity.node-locality-delay=40\n'
-                              'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
-                              'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+
+    # Services 14: YARN service with : (1). 'capacity scheduler' having more than 2 queues and
+    # (2). 'enable_hive_interactive' is ON and (3). configuration change detected for 'llap_queue_capacity'
+    services_14 = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6403.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
+      }
+      ],
+      "changed-configurations": [
+        {
+          u'old_value': u'0',
+          u'type': u'hive-interactive-env',
+          u'name': u'llap_queue_capacity'
+        }
+      ],
+      "configurations": {
+        "capacity-scheduler": {
+          "properties": {
+            "capacity-scheduler": "yarn.scheduler.capacity.maximum-am-resource-percent=0.2\n"
+                                  "yarn.scheduler.capacity.maximum-applications=10000\n"
+                                  "yarn.scheduler.capacity.node-locality-delay=40\n"
+                                  "yarn.scheduler.capacity.queue-mappings-override.enable=false\n"
+                                  "yarn.scheduler.capacity.resource-calculator=org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator\n"
+                                  "yarn.scheduler.capacity.root.accessible-node-labels=*\n"
+                                  "yarn.scheduler.capacity.root.acl_administer_queue=*\n"
+                                  "yarn.scheduler.capacity.root.capacity=100\n"
+                                  "yarn.scheduler.capacity.root.default.a.a1.acl_administer_queue=*\n"
+                                  "yarn.scheduler.capacity.root.default.a.a1.acl_submit_applications=*\n"
+                                  "yarn.scheduler.capacity.root.default.a.a1.capacity=75\n"
+                                  "yarn.scheduler.capacity.root.default.a.a1.maximum-capacity=100\n"
+                                  "yarn.scheduler.capacity.root.default.a.a1.minimum-user-limit-percent=100\n"
+                                  "yarn.scheduler.capacity.root.default.a.a1.ordering-policy=fifo\n"
+                                  "yarn.scheduler.capacity.root.default.a.a1.state=RUNNING\n"
+                                  "yarn.scheduler.capacity.root.default.a.a1.user-limit-factor=1\n"
+                                  "yarn.scheduler.capacity.root.default.a.llap.acl_administer_queue=*\n"
+                                  "yarn.scheduler.capacity.root.default.a.llap.acl_submit_applications=*\n"
+                                  "yarn.scheduler.capacity.root.default.a.llap.capacity=25\n"
+                                  "yarn.scheduler.capacity.root.default.a.llap.maximum-capacity=25\n"
+                                  "yarn.scheduler.capacity.root.default.a.llap.minimum-user-limit-percent=100\n"
+                                  "yarn.scheduler.capacity.root.default.a.llap.ordering-policy=fifo\n"
+                                  "yarn.scheduler.capacity.root.default.a.llap.state=RUNNING\n"
+                                  "yarn.scheduler.capacity.root.default.a.llap.user-limit-factor=1\n"
+                                  "yarn.scheduler.capacity.root.default.a.acl_administer_queue=*\n"
+                                  "yarn.scheduler.capacity.root.default.a.acl_submit_applications=*\n"
+                                  "yarn.scheduler.capacity.root.default.a.capacity=50\n"
+                                  "yarn.scheduler.capacity.root.default.a.maximum-capacity=100\n"
+                                  "yarn.scheduler.capacity.root.default.a.minimum-user-limit-percent=100\n"
+                                  "yarn.scheduler.capacity.root.default.a.ordering-policy=fifo\n"
+                                  "yarn.scheduler.capacity.root.default.a.queues=a1,llap\n"
+                                  "yarn.scheduler.capacity.root.default.a.state=RUNNING\n"
+                                  "yarn.scheduler.capacity.root.default.a.user-limit-factor=1\n"
+                                  "yarn.scheduler.capacity.root.default.acl_submit_applications=*\n"
+                                  "yarn.scheduler.capacity.root.default.b.acl_administer_queue=*\n"
+                                  "yarn.scheduler.capacity.root.default.b.acl_submit_applications=*\n"
+                                  "yarn.scheduler.capacity.root.default.b.capacity=50\n"
+                                  "yarn.scheduler.capacity.root.default.b.maximum-capacity=50\n"
+                                  "yarn.scheduler.capacity.root.default.b.minimum-user-limit-percent=100\n"
+                                  "yarn.scheduler.capacity.root.default.b.ordering-policy=fifo\n"
+                                  "yarn.scheduler.capacity.root.default.b.state=RUNNING\n"
+                                  "yarn.scheduler.capacity.root.default.b.user-limit-factor=1\n"
+                                  "yarn.scheduler.capacity.root.default.capacity=100\n"
+                                  "yarn.scheduler.capacity.root.default.maximum-capacity=100\n"
+                                  "yarn.scheduler.capacity.root.default.queues=a,b\n"
+                                  "yarn.scheduler.capacity.root.default.state=RUNNING\n"
+                                  "yarn.scheduler.capacity.root.default.user-limit-factor=1\n"
+                                  "yarn.scheduler.capacity.root.queues=default"
+
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'true',
+              'llap_queue_capacity':'40'
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'llap',
+              'hive.server2.tez.sessions.per.default.queue': '1',
+              'hive.tez.container.size': '341'
+
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "341",
+            "yarn.nodemanager.resource.memory-mb": "4096",
+            "yarn.nodemanager.resource.cpu-vcores": '1'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "341"
+          }
+        }
+      }
+    }
+
+
+
+    # Services 15: YARN service with : (1). 'capacity scheduler' having 'llap' (state:stopped) and 'default' queue at
+    # root level and (2). 'enable_hive_interactive' is ON and (3). 'hive.llap.daemon.queue.name' == 'default'
+    services_15 = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6403.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
+      }
+      ],
+      "changed-configurations": [
+        {
+          u'old_value': u'0',
+          u'type': u'hive-interactive-env',
+          u'name': u'llap_queue_capacity'
+        }
+      ],
+      "configurations": {
+        "capacity-scheduler": {
+          "properties": {
+            "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
+                                  'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                                  'yarn.scheduler.capacity.root.capacity=100\n'
+                                  'yarn.scheduler.capacity.root.queues=default,llap\n'
+                                  'yarn.scheduler.capacity.maximum-applications=10000\n'
+                                  'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                                  'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                                  'yarn.scheduler.capacity.root.default.capacity=60\n'
+                                  'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                                  'yarn.scheduler.capacity.node-locality-delay=40\n'
+                                  'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                                  'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.llap.state=STOPPED\n'
+                                  'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                                  'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'true',
+              'llap_queue_capacity':'40'
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'default',
+              'hive.server2.tez.sessions.per.default.queue': '1',
+              'hive.tez.container.size': '341'
+
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "341",
+            "yarn.nodemanager.resource.memory-mb": "4096",
+            "yarn.nodemanager.resource.cpu-vcores": '1'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "341"
+          }
+        }
+      }
+    }
+
+
+
+    # Services 16: YARN service with : (1). 'capacity scheduler' having 'llap' and 'default' queue at root level and
+    # (2). 'enable_hive_interactive' is ON and (3). configuration change detected for 'enable_hive_interactive'
+    services_16 = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6403.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
+      }
+      ],
+      "changed-configurations": [
+        {
+          u'old_value': u'true',
+          u'type': u'hive-interactive-env',
+          u'name': u'enable_hive_interactive'
+        }
+      ],
+      "configurations": {
+        "capacity-scheduler": {
+          "properties": {
+            "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
+                                  'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                                  'yarn.scheduler.capacity.root.capacity=100\n'
+                                  'yarn.scheduler.capacity.root.queues=default,llap\n'
+                                  'yarn.scheduler.capacity.maximum-applications=10000\n'
+                                  'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                                  'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                                  'yarn.scheduler.capacity.root.default.capacity=60\n'
+                                  'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                                  'yarn.scheduler.capacity.node-locality-delay=40\n'
+                                  'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                                  'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                                  'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'true',
+              'llap_queue_capacity':'40'
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'llap',
+              'hive.server2.tez.sessions.per.default.queue': '1',
+              'hive.tez.container.size': '341'
+
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "341",
+            "yarn.nodemanager.resource.memory-mb": "4096",
+            "yarn.nodemanager.resource.cpu-vcores": '1'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "341"
+          }
+        }
+      }
+    }
+
+
+    # Services 17: YARN service with : (1). 'capacity scheduler' having 'llap' and 'default' queue at root level and
+    # (2). 'enable_hive_interactive' is OFF and (3). configuration change detected for 'enable_hive_interactive'
+    services_17 = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6403.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
+      }
+      ],
+      "changed-configurations": [
+        {
+          u'old_value': u'false',
+          u'type': u'hive-interactive-env',
+          u'name': u'enable_hive_interactive'
+        }
+      ],
+      "configurations": {
+        "capacity-scheduler": {
+          "properties": {
+            "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
+                                  'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                                  'yarn.scheduler.capacity.root.capacity=100\n'
+                                  'yarn.scheduler.capacity.root.queues=default,llap\n'
+                                  'yarn.scheduler.capacity.maximum-applications=10000\n'
+                                  'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                                  'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                                  'yarn.scheduler.capacity.root.default.capacity=60\n'
+                                  'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                                  'yarn.scheduler.capacity.node-locality-delay=40\n'
+                                  'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                                  'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                                  'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'false',
+              'llap_queue_capacity':'40'
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'llap',
+              'hive.server2.tez.sessions.per.default.queue': '1',
+              'hive.tez.container.size': '341'
+
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "341",
+            "yarn.nodemanager.resource.memory-mb": "4096",
+            "yarn.nodemanager.resource.cpu-vcores": '1'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "341"
+          }
+        }
+      }
+    }
+
+
+
+    # Services 18: YARN service with : (1). 'capacity scheduler' having 'llap' and 'default' queue at root level and
+    # (2). 'enable_hive_interactive' is OFF and (3). configuration change NOT detected for 'enable_hive_interactive'
+    services_18 = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6403.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
+      }
+      ],
+      "configurations": {
+        "capacity-scheduler": {
+          "properties": {
+            "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
+                                  'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                                  'yarn.scheduler.capacity.root.capacity=100\n'
+                                  'yarn.scheduler.capacity.root.queues=default,llap\n'
+                                  'yarn.scheduler.capacity.maximum-applications=10000\n'
+                                  'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                                  'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                                  'yarn.scheduler.capacity.root.default.capacity=60\n'
+                                  'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                                  'yarn.scheduler.capacity.node-locality-delay=40\n'
+                                  'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                                  'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.llap.state=STOPPED\n'
+                                  'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                                  'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'false',
+              'llap_queue_capacity':'40'
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'llap',
+              'hive.server2.tez.sessions.per.default.queue': '1',
+              'hive.tez.container.size': '341'
+
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "341",
+            "yarn.nodemanager.resource.memory-mb": "4096",
+            "yarn.nodemanager.resource.cpu-vcores": '1'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "341"
+          }
+        }
+      }
+    }
+
+
+    # Services 19: YARN service with : (1). 'capacity scheduler' having 'llap' and 'default' queue at root level and
+    # (2). 'enable_hive_interactive' is ON and (3). configuration change detected for 'enable_hive_interactive'
+    services_19 = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6403.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
+      }
+      ],
+      "changed-configurations": [
+        {
+          u'old_value': u'2',
+          u'type': u'hive-interactive-env',
+          u'name': u'num_llap_nodes'
+        }
+      ],
+      "configurations": {
+        "capacity-scheduler": {
+          "properties": {
+            "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
+                                  'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                                  'yarn.scheduler.capacity.root.capacity=100\n'
+                                  'yarn.scheduler.capacity.root.queues=default,llap\n'
+                                  'yarn.scheduler.capacity.maximum-applications=10000\n'
+                                  'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                                  'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                                  'yarn.scheduler.capacity.root.default.capacity=60\n'
+                                  'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                                  'yarn.scheduler.capacity.node-locality-delay=40\n'
+                                  'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                                  'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                                  'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'true',
+              'llap_queue_capacity':'40',
+              'num_llap_nodes': 1
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'llap',
+              'hive.server2.tez.sessions.per.default.queue': '1',
+              'hive.tez.container.size': '341'
+
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "341",
+            "yarn.nodemanager.resource.memory-mb": "4096",
+            "yarn.nodemanager.resource.cpu-vcores": '1'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "341"
+          }
+        }
+      }
+    }
+
+
+    # Services 20: YARN service with : (1). 'capacity scheduler' having 'llap' and 'default' queue at root level and
+    # (2). 'enable_hive_interactive' is ON and (3). configuration change detected for 'llap_queue_capacity'
+    # yarn.nodemanager.resource.memory-mb": "24576"
+    services_20 = {
+      "services": [{
+        "StackServices": {
+          "service_name": "YARN",
+        },
+        "Versions": {
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NODEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+      }, {
+        "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE",
+        "StackServices": {
+          "service_name": "HIVE",
+          "service_version": "1.2.1.2.5",
+          "stack_name": "HDP",
+          "stack_version": "2.5"
+        },
+        "components": [
+          {
+            "href": "/api/v1/stacks/HDP/versions/2.5/services/HIVE/components/HIVE_SERVER_INTERACTIVE",
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "bulk_commands_display_name": "",
+              "bulk_commands_master_component_name": "",
+              "cardinality": "0-1",
+              "component_category": "MASTER",
+              "component_name": "HIVE_SERVER_INTERACTIVE",
+              "custom_commands": ["RESTART_LLAP"],
+              "decommission_allowed": "false",
+              "display_name": "HiveServer2 Interactive",
+              "has_bulk_commands_definition": "false",
+              "is_client": "false",
+              "is_master": "true",
+              "reassign_allowed": "false",
+              "recovery_enabled": "false",
+              "service_name": "HIVE",
+              "stack_name": "HDP",
+              "stack_version": "2.5",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+            "dependencies": []
+          },
+          {
+            "StackServiceComponents": {
+              "advertise_version": "true",
+              "cardinality": "1+",
+              "component_category": "SLAVE",
+              "component_name": "NODEMANAGER",
+              "display_name": "NodeManager",
+              "is_client": "false",
+              "is_master": "false",
+              "hostnames": [
+                "c6403.ambari.apache.org"
+              ]
+            },
+            "dependencies": []
+          },
+        ]
+      }
+      ],
+      "changed-configurations": [
+        {
+          u'old_value': u'25',
+          u'type': u'hive-interactive-env',
+          u'name': u'llap_queue_capacity'
+        }
+      ],
+      "configurations": {
+        "capacity-scheduler": {
+          "properties": {
+            "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
+                                  'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                                  'yarn.scheduler.capacity.root.capacity=100\n'
+                                  'yarn.scheduler.capacity.root.queues=default,llap\n'
+                                  'yarn.scheduler.capacity.maximum-applications=10000\n'
+                                  'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                                  'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                                  'yarn.scheduler.capacity.root.default.capacity=60\n'
+                                  'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                                  'yarn.scheduler.capacity.node-locality-delay=40\n'
+                                  'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                                  'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                                  'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
+                                  'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                                  'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.capacity=40\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                                  'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+
+          }
+        },
+        "hive-interactive-env":
+          {
+            'properties': {
+              'enable_hive_interactive': 'true',
+              'llap_queue_capacity':'60'
+            }
+          },
+        "hive-interactive-site":
+          {
+            'properties': {
+              'hive.llap.daemon.queue.name': 'llap',
+              'hive.server2.tez.sessions.per.default.queue': '1',
+              'hive.tez.container.size': '341'
+
+            }
+          },
+        "hive-env":
+          {
+            'properties': {
+              'hive_user': 'hive'
+            }
+          },
+        "yarn-site": {
+          "properties": {
+            "yarn.scheduler.minimum-allocation-mb": "682",
+            "yarn.nodemanager.resource.memory-mb": "24576",
+            "yarn.nodemanager.resource.cpu-vcores": '2'
+          }
+        },
+        "tez-interactive-site": {
+          "properties": {
+            "tez.am.resource.memory.mb": "341"
+          }
+        }
+      }
+    }
+
+
+
+
+
+
+    # Expected config outputs.
+
+    # Expected capacity-scheduler with 'llap' (size:20) and 'default' queue at root level.
+    expected_capacity_scheduler_llap_queue_size_20 = {
+      "properties": {
+        "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=80\n'
+                              'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                              'yarn.scheduler.capacity.root.capacity=100\n'
+                              'yarn.scheduler.capacity.root.queues=default,llap\n'
+                              'yarn.scheduler.capacity.maximum-applications=10000\n'
+                              'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                              'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                              'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                              'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                              'yarn.scheduler.capacity.root.default.capacity=80\n'
+                              'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                              'yarn.scheduler.capacity.node-locality-delay=40\n'
+                              'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                              'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                              'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
+                              'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                              'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                              'yarn.scheduler.capacity.root.llap.maximum-capacity=20\n'
+                              'yarn.scheduler.capacity.root.llap.capacity=20\n'
+                              'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                              'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                              'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+
+      }
+    }
+
+    # Expected capacity-scheduler with 'llap' (size:40) and 'default' queue at root level.
+    expected_capacity_scheduler_llap_queue_size_40 = {
+      "properties": {
+        "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=60\n'
+                              'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                              'yarn.scheduler.capacity.root.capacity=100\n'
+                              'yarn.scheduler.capacity.root.queues=default,llap\n'
+                              'yarn.scheduler.capacity.maximum-applications=10000\n'
+                              'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                              'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                              'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                              'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                              'yarn.scheduler.capacity.root.default.capacity=60\n'
+                              'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                              'yarn.scheduler.capacity.node-locality-delay=40\n'
+                              'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                              'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
+                              'yarn.scheduler.capacity.root.llap.state=RUNNING\n'
+                              'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
+                              'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
+                              'yarn.scheduler.capacity.root.llap.maximum-capacity=40\n'
+                              'yarn.scheduler.capacity.root.llap.capacity=40\n'
+                              'yarn.scheduler.capacity.root.llap.acl_submit_applications=hive\n'
+                              'yarn.scheduler.capacity.root.llap.acl_administer_queue=hive\n'
+                              'yarn.scheduler.capacity.root.llap.maximum-am-resource-percent=1'
+
+      }
+    }
+
+    # Expected capacity-scheduler with 'llap' state = STOPPED, cap = 0 % and 'default' queue cap to 100%.
+    expected_capacity_scheduler_llap_Stopped_size_0 = {
+      "properties": {
+        "capacity-scheduler": 'yarn.scheduler.capacity.root.default.maximum-capacity=100\n'
+                              'yarn.scheduler.capacity.root.accessible-node-labels=*\n'
+                              'yarn.scheduler.capacity.root.capacity=100\n'
+                              'yarn.scheduler.capacity.root.queues=default,llap\n'
+                              'yarn.scheduler.capacity.maximum-applications=10000\n'
+                              'yarn.scheduler.capacity.root.default.user-limit-factor=1\n'
+                              'yarn.scheduler.capacity.root.default.state=RUNNING\n'
+                              'yarn.scheduler.capacity.maximum-am-resource-percent=1\n'
+                              'yarn.scheduler.capacity.root.default.acl_submit_applications=*\n'
+                              'yarn.scheduler.capacity.root.default.capacity=100\n'
+                              'yarn.scheduler.capacity.root.acl_administer_queue=*\n'
+                              'yarn.scheduler.capacity.node-locality-delay=40\n'
+                              'yarn.scheduler.capacity.queue-mappings-override.enable=false\n'
+                              'yarn.scheduler.capacity.root.llap.user-limit-factor=1\n'
                               'yarn.scheduler.capacity.root.llap.state=STOPPED\n'
                               'yarn.scheduler.capacity.root.llap.ordering-policy=fifo\n'
                               'yarn.scheduler.capacity.root.llap.minimum-user-limit-percent=100\n'
@@ -1553,6 +2856,14 @@ class TestHDP25StackAdvisor(TestCase):
       }
     }
 
+    # Expected 'hive_interactive_site' when no modifications are done.
+    expected_hive_interactive_env_empty = {
+      "hive-interactive-env": {
+        "properties": {
+        }
+      }
+    }
+
     # Expected 'hive_interactive_env' with 'llap_queue_capacity' set to 20.
     expected_llap_queue_capacity_20 = '20'
 
@@ -1560,17 +2871,21 @@ class TestHDP25StackAdvisor(TestCase):
     expected_llap_queue_capacity_40 = '40'
 
 
+    # expected vals.
+    expected_visibility_false = {'visible': 'false'}
+    expected_visibility_true = {'visible': 'true'}
+
 
 
     #################### Tests #####################
 
 
+
     # Test 1 : (1). Only default queue exists in capacity-scheduler (2). enable_hive_interactive' is 'On' and
     # 'llap_queue_capacity is 0.
     configurations = {
     }
     self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services_1, hosts)
-
     # Check output
     self.assertEquals(configurations['hive-interactive-site']['properties']['hive.llap.daemon.queue.name'],
                       expected_hive_interactive_site_llap['hive-interactive-site']['properties']['hive.llap.daemon.queue.name'])
@@ -1888,6 +3203,222 @@ class TestHDP25StackAdvisor(TestCase):
     self.assertEquals(configurations, expected)
 
 
+    # Test 13: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler, and
+    #         (2). enable_hive_interactive' is 'on' and (3). configuration change detected for 'llap_queue_capacity'
+    #         Expected : Configurations values recommended for llap related configs.
+    configurations = {
+    }
+
+    # expected vals.
+    yarn_cont_mb_visibility = {'minimum': '682', 'maximum': '682'}
+    llap_io_mem_size_visibility = {'minimum': '0', 'maximum': '341'}
+    llap_num_executors_visibility = {'minimum': '1', 'maximum': '1'}
+    num_llap_nodes_visibility = {'minimum': '1', 'maximum': '1'}
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services_13, hosts)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '682')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.yarn.container.mb'], yarn_cont_mb_visibility)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '1')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.num.executors'], llap_num_executors_visibility)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'], '341')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.io.memory.size'], llap_io_mem_size_visibility)
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['num_llap_nodes'], '1')
+    self.assertEquals(configurations['hive-interactive-env']['property_attributes']['num_llap_nodes'], num_llap_nodes_visibility)
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'], '272')
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['slider_am_container_mb'], '341')
+    self.assertEquals(configurations['hive-interactive-env']['property_attributes']['llap_queue_capacity'], expected_visibility_true)
+
+
+
+    # Test 14: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler, and
+    #         (2). enable_hive_interactive' is 'on' and (3). configuration change detected for 'enable_hive_interactive'
+    #         Expected : Configurations values recommended for llap related configs.
+    configurations = {
+    }
+
+    # expected vals.
+    yarn_cont_mb_visibility = {'minimum': '682', 'maximum': '682'}
+    llap_io_mem_size_visibility = {'minimum': '0', 'maximum': '341'}
+    llap_num_executors_visibility = {'minimum': '1', 'maximum': '1'}
+    num_llap_nodes_visibility = {'minimum': '1', 'maximum': '1'}
+
+
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services_16, hosts)
+
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '682')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.yarn.container.mb'], yarn_cont_mb_visibility)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '1')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.num.executors'], llap_num_executors_visibility)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'], '341')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.io.memory.size'], llap_io_mem_size_visibility)
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['num_llap_nodes'], '1')
+    self.assertEquals(configurations['hive-interactive-env']['property_attributes']['num_llap_nodes'], num_llap_nodes_visibility)
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'], '272')
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['slider_am_container_mb'], '341')
+    self.assertEquals(configurations['hive-interactive-env']['property_attributes']['llap_queue_capacity'], expected_visibility_true)
+
+
+
+    # Test 15: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler, and
+    #         (2). enable_hive_interactive' is 'OFF' and (3). configuration change detected for 'enable_hive_interactive'
+    #         Expected : Configurations values recommended for llap related configs.
+    configurations = {
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services_17, hosts)
+
+    cap_sched_output_dict = convertToDict(configurations['capacity-scheduler']['properties']['capacity-scheduler'])
+    cap_sched_expected_dict = convertToDict(expected_capacity_scheduler_llap_Stopped_size_0['properties']['capacity-scheduler'])
+    self.assertEqual(cap_sched_output_dict, cap_sched_expected_dict)
+
+    self.assertEquals(configurations['hive-interactive-site']['properties'],
+                      expected_hive_interactive_site_default['hive-interactive-site']['properties'])
+    self.assertTrue('hive-interactive-env' not in configurations)
+
+    self.assertTrue('property_attributes' not in configurations)
+
+
+    # Test 16: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler, and
+    #         (2). enable_hive_interactive' is 'OFF' and (3). configuration change NOT detected for 'enable_hive_interactive'
+    #         Expected : No changes.
+    configurations = {
+    }
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services_18, hosts)
+
+    self.assertEquals(configurations['capacity-scheduler']['properties'],
+                      expected_capacity_scheduler_empty['properties'])
+    self.assertEquals(configurations['hive-interactive-site']['properties'],
+                      expected_hive_interactive_site_empty['hive-interactive-site']['properties'])
+    self.assertTrue('hive-interactive-env' not in configurations)
+
+    self.assertTrue('property_attributes' not in configurations)
+
+
+    # Test 17: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler, and
+    #         (2). enable_hive_interactive' is 'on' and (3). configuration change detected for 'enable_hive_interactive'
+    #         Expected : Configurations values recommended for llap related configs.
+    configurations = {
+    }
+
+    # expected vals.
+    yarn_cont_mb_visibility = {'minimum': '682', 'maximum': '682'}
+    llap_io_mem_size_visibility = {'minimum': '0', 'maximum': '341'}
+    llap_num_executors_visibility = {'minimum': '1', 'maximum': '1'}
+
+
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services_19, hosts)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '682')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.yarn.container.mb'], yarn_cont_mb_visibility)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '1')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.num.executors'], llap_num_executors_visibility)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'], '341')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.io.memory.size'], llap_io_mem_size_visibility)
+
+    self.assertTrue('num_llap_nodes' not in configurations['hive-interactive-env']['properties'])
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'], '272')
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['slider_am_container_mb'], '341')
+
+    self.assertEquals(configurations['hive-interactive-env']['property_attributes']['llap_queue_capacity'], expected_visibility_true)
+
+
+
+
+
+    # Test 18: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler, and
+    #         (2). enable_hive_interactive' is 'on' and (3). configuration change detected for 'llap_queue_capacity'
+    #         Expected : Configurations values recommended for llap related configs.
+    configurations = {
+    }
+
+    # expected vals.
+    yarn_cont_mb_visibility = {'minimum': '12958', 'maximum': '12958'}
+    llap_io_mem_size_visibility = {'minimum': '0', 'maximum': '12617'}
+    llap_num_executors_visibility = {'minimum': '1', 'maximum': '8'}
+    num_llap_nodes_visibility = {'minimum': '1', 'maximum': '1'}
+
+
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services_20, hosts)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.yarn.container.mb'], '12958')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.yarn.container.mb'], yarn_cont_mb_visibility)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.daemon.num.executors'], '2')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.daemon.num.executors'], llap_num_executors_visibility)
+
+    self.assertEqual(configurations['hive-interactive-site']['properties']['hive.llap.io.memory.size'], '12276')
+    self.assertEquals(configurations['hive-interactive-site']['property_attributes']['hive.llap.io.memory.size'], llap_io_mem_size_visibility)
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['num_llap_nodes'], '1')
+    self.assertEquals(configurations['hive-interactive-env']['property_attributes']['num_llap_nodes'], num_llap_nodes_visibility)
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['llap_heap_size'], '545')
+
+    self.assertEqual(configurations['hive-interactive-env']['properties']['slider_am_container_mb'], '682')
+
+    self.assertEquals(configurations['hive-interactive-env']['property_attributes']['llap_queue_capacity'], expected_visibility_true)
+
+
+
+
+    # Test 19: (1). 'default' and 'llap' (State : RUNNING) queue exists at root level in capacity-scheduler, and
+    #         (2). enable_hive_interactive' is 'on' and (3). More than 2 queues exist.
+    #
+    configurations = {
+    }
+
+
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services_14, hosts)
+
+    self.assertEquals(configurations['capacity-scheduler']['properties'],
+                      expected_capacity_scheduler_empty['properties'])
+    self.assertEquals(configurations['hive-interactive-site']['properties'],
+                      expected_hive_interactive_site_empty['hive-interactive-site']['properties'])
+    self.assertEquals(configurations['hive-interactive-env']['properties'],
+                      expected_hive_interactive_env_empty['hive-interactive-env']['properties'])
+    self.assertEquals(configurations['hive-interactive-env']['property_attributes']['llap_queue_capacity'], expected_visibility_true)
+
+
+
+    # Test 20: YARN service with : (1). 'capacity scheduler' having 'llap' (state:stopped) and 'default' queue at
+    # root level and (2). 'enable_hive_interactive' is ON and (3). 'hive.llap.daemon.queue.name' == 'default'
+    #
+    configurations = {
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services_15, hosts)
+    self.assertEquals(configurations['capacity-scheduler']['properties'],
+                      expected_capacity_scheduler_empty['properties'])
+
+    self.assertEquals(configurations['hive-interactive-site']['properties'],
+                      expected_hive_interactive_site_empty['hive-interactive-site']['properties'])
+
+    self.assertEquals(configurations['hive-interactive-env']['properties'],
+                      expected_hive_interactive_env_empty['hive-interactive-env']['properties'])
+
+    self.assertEquals(configurations['hive-interactive-env']['property_attributes']['llap_queue_capacity'], expected_visibility_false)
+
+
 """
 Helper method to convert string of key-values to dict.
 """

http://git-wip-us.apache.org/repos/asf/ambari/blob/29179e76/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
index 1cb2a02..e7bb056 100644
--- a/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
+++ b/ambari-server/src/test/python/stacks/2.5/configs/hsi_default.json
@@ -396,7 +396,9 @@
           "hive_server_interactive_host" : "c6401.ambari.apache.org",
           "llap_queue_capacity" : "0",
           "num_llap_nodes" : "1",
-          "num_retries_for_checking_llap_status" : 2
+          "num_retries_for_checking_llap_status" : 2,
+          "llap_heap_size":"280",
+          "llap_log_level":"INFO"
         },
         "hive-interactive-site": {
             "hive.enforce.sorting": "true",
@@ -435,7 +437,9 @@
             "hive.server2.authentication": "NOSASL",
             "hive.server2.transport.mode": "binary",
             "hive.optimize.mapjoin.mapreduce": "true",
-            "hive.exec.scratchdir" : "/custompath/tmp/hive"
+            "hive.exec.scratchdir" : "/custompath/tmp/hive",
+            "hive.llap.daemon.yarn.container.mb" : "341",
+            "hive.llap.io.memory.size" : "341"
         },
 		"ranger-hive-plugin-properties": {
             "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",


[2/2] ambari git commit: HiveServerInteractive. (1). AMBARI-16078. Config calculations used by LLAP app. (2). AMBARI-16086. Hide 'llap' queue capacity slider when 'llap' queue is not being used for LLAP app. (3). AMBARI-16087. LLAP queue is not selected

Posted by sw...@apache.org.
HiveServerInteractive. (1). AMBARI-16078. Config calculations used by LLAP app. (2). AMBARI-16086. Hide 'llap' queue capacity slider when 'llap' queue is not being used for LLAP app. (3). AMBARI-16087. LLAP queue is not selected when enabling Hive Interactive Query during Install Wizard (4). AMBARI-16079. waiting for fixed number of times if the launched LLAP app is in LAUNCHING state, for it to go to RUNNING_ALL or RUNNING_PARTIAL


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/29179e76
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/29179e76
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/29179e76

Branch: refs/heads/trunk
Commit: 29179e766f957bd61feae767ec2c34655ef080b0
Parents: b9bbb6a
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon Apr 25 01:18:07 2016 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Mon Apr 25 16:41:53 2016 -0700

----------------------------------------------------------------------
 .../package/scripts/hive_server_interactive.py  |   21 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |   19 +-
 .../HIVE/configuration/hive-interactive-env.xml |   91 +
 .../configuration/hive-interactive-site.xml     |  103 +-
 .../HDP/2.5/services/HIVE/themes/theme.json     |    2 +-
 .../stacks/HDP/2.5/services/stack_advisor.py    |  556 +++++-
 .../stacks/2.5/HIVE/test_hive_server_int.py     |    6 +-
 .../stacks/2.5/common/test_stack_advisor.py     | 1681 +++++++++++++++++-
 .../python/stacks/2.5/configs/hsi_default.json  |    8 +-
 9 files changed, 2340 insertions(+), 147 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/29179e76/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
index b13189d..6bf4c0a 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
@@ -207,9 +207,10 @@ class HiveServerInteractiveDefault(HiveServerInteractive):
 
       unique_name = "llap-slider%s" % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
 
-      cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llap --instances 1 "
-                   "--slider-am-container-mb {slider_am_container_mb} --loglevel INFO --output {unique_name}")
-
+      cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llap --instances {params.num_llap_nodes}"
+                   " --slider-am-container-mb {params.slider_am_container_mb} --size {params.llap_daemon_container_size}m "
+                   " --cache {params.hive_llap_io_mem_size}m --xmx {params.llap_heap_size}m --loglevel {params.llap_log_level}"
+                   " --output {unique_name}")
       if params.security_enabled:
         cmd += format(" --slider-keytab-dir .slider/keytabs/{params.hive_user}/ --slider-keytab "
                       "{hive_llap_keytab_file} --slider-principal {hive_headless_keytab}")
@@ -299,8 +300,11 @@ class HiveServerInteractiveDefault(HiveServerInteractive):
     """
     Checks llap app status. The states can be : 'COMPLETE', 'APP_NOT_FOUND', 'RUNNING_PARTIAL', 'RUNNING_ALL' & 'LAUNCHING'.
 
-    If app is in 'APP_NOT_FOUND', 'RUNNING_PARTIAL' and 'LAUNCHING' state, we wait for 'num_times_to_wait' to have app
-    in (1). 'RUNNING_ALL' (2). 'LAUNCHING' or (3). 'RUNNING_PARTIAL' state with 80% or more 'desiredInstances' running.
+    if app is in 'APP_NOT_FOUND', 'RUNNING_PARTIAL' and 'LAUNCHING' state:
+       we wait for 'num_times_to_wait' to have app in (1). 'RUNNING_ALL' or (2). 'RUNNING_PARTIAL'
+       state with 80% or more 'desiredInstances' running and Return True
+    else :
+       Return False
 
     Parameters: llap_app_name : deployed llap app name.
                 num_retries :   Number of retries to check the LLAP app status.
@@ -326,7 +330,7 @@ class HiveServerInteractiveDefault(HiveServerInteractive):
           Logger.error("Malformed JSON data received for LLAP app. Exiting ....")
           return False
 
-        if llap_app_info['state'].upper() in ['RUNNING_ALL', 'LAUNCHING']:
+        if llap_app_info['state'].upper() == 'RUNNING_ALL':
           Logger.info(
             "LLAP app '{0}' in '{1}' state.".format(llap_app_name, llap_app_info['state']))
           return True
@@ -361,8 +365,9 @@ class HiveServerInteractiveDefault(HiveServerInteractive):
                                                        llap_app_info['liveInstances'],
                                                        llap_app_info['desiredInstances'],
                                                        time.time() - curr_time))
-            raise Fail('App state is RUNNING_PARTIAL.')
-        elif llap_app_info['state'].upper() == 'APP_NOT_FOUND':
+            raise Fail("App state is RUNNING_PARTIAL. Live Instances : '{0}', Desired Instance : '{1}'".format(llap_app_info['liveInstances'],
+                                                                                                           llap_app_info['desiredInstances']))
+        elif llap_app_info['state'].upper() in ['APP_NOT_FOUND', 'LAUNCHING']:
           status_str = format("Slider app '{0}' current state is {1}.".format(llap_app_name, llap_app_info['state']))
           Logger.info(status_str)
           raise Fail(status_str)

http://git-wip-us.apache.org/repos/asf/ambari/blob/29179e76/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index a044fda..aa561f8 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -70,7 +70,7 @@ version = default("/commandParams/version", None)
 current_version = default("/hostLevelParams/current_version", None)
 
 # When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
-# downgrade_from_version provides the source-version the downgrade is happening from 
+# downgrade_from_version provides the source-version the downgrade is happening from
 downgrade_from_version = default("/commandParams/downgrade_from_version", None)
 
 # Upgrade direction
@@ -320,8 +320,7 @@ start_metastore_script = 'startMetastore.sh'
 hive_metastore_pid = status_params.hive_metastore_pid
 
 # Hive Server Interactive
-# TODO , not being passed right now
-slider_am_container_mb = default("/configurations/hive-site-interactive/slider-am-container-mb", 341)
+slider_am_container_mb = default("/configurations/hive-interactive-env/slider_am_container_mb", 341)
 
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 yarn_user = config['configurations']['yarn-env']['yarn_user']
@@ -340,7 +339,7 @@ start_metastore_path = format("{tmp_dir}/start_metastore_script")
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 
 if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
-  if stack_version_formatted_major and check_stack_feature(StackFeature.HIVE_ENV_HEAPSIZE, stack_version_formatted_major): 
+  if stack_version_formatted_major and check_stack_feature(StackFeature.HIVE_ENV_HEAPSIZE, stack_version_formatted_major):
     hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
   else:
     hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
@@ -418,7 +417,7 @@ if OSCheck.is_ubuntu_family():
   mysql_configname = '/etc/mysql/my.cnf'
 else:
   mysql_configname = '/etc/my.cnf'
-  
+
 mysql_user = 'mysql'
 
 # Hive security
@@ -516,6 +515,12 @@ if has_hive_interactive:
   tez_interactive_config_dir = "/etc/tez_hive2/conf"
   tez_interactive_user = config['configurations']['tez-env']['tez_user']
   num_retries_for_checking_llap_status = default('/configurations/hive-interactive-env/num_retries_for_checking_llap_status', 10)
+  # Used in LLAP slider package creation
+  num_llap_nodes = config['configurations']['hive-interactive-env']['num_llap_nodes']
+  llap_daemon_container_size = config['configurations']['hive-interactive-site']['hive.llap.daemon.yarn.container.mb']
+  llap_log_level = config['configurations']['hive-interactive-env']['llap_log_level']
+  hive_llap_io_mem_size = config['configurations']['hive-interactive-site']['hive.llap.io.memory.size']
+  llap_heap_size = config['configurations']['hive-interactive-env']['llap_heap_size']
   if security_enabled:
     hive_llap_keytab_file = config['configurations']['hive-interactive-site']['hive.llap.zk.sm.keytab.file']
     hive_headless_keytab = config['configurations']['hive-interactive-site']['hive.llap.zk.sm.principal']
@@ -576,7 +581,7 @@ if has_ranger_admin:
       ranger_jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
       audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
       jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-  
+
   ranger_downloaded_custom_connector = format("{tmp_dir}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
   ranger_driver_curl_source = format("{jdk_location}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
   ranger_driver_curl_target = format("{hive_lib}/{ranger_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
@@ -589,7 +594,7 @@ if has_ranger_admin:
     'jdbc.url': format("{hive_url}/default;principal={hive_principal}") if security_enabled else hive_url,
     'commonNameForCertificate': common_name_for_certificate
   }
-  
+
   hive_ranger_plugin_repo = {
     'isActive': 'true',
     'config': json.dumps(hive_ranger_plugin_config),

http://git-wip-us.apache.org/repos/asf/ambari/blob/29179e76/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-env.xml
index 816e207..aad9c47 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-env.xml
@@ -67,6 +67,18 @@
         <type>hive-interactive-env</type>
         <name>enable_hive_interactive</name>
       </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>num_llap_nodes</name>
+      </property>
+      <property>
+        <type>hive-interactive-site</type>
+        <name>hive.llap.daemon.queue.name</name>
+      </property>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
     </depends-on>
   </property>
   <property>
@@ -80,6 +92,24 @@
       <maximum>8</maximum>
       <increment-step>1</increment-step>
     </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+      <property>
+        <type>hive-interactive-site</type>
+        <name>hive.llap.daemon.queue.name</name>
+      </property>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
   </property>
   <property>
     <name>num_retries_for_checking_llap_status</name>
@@ -93,6 +123,67 @@
       <increment-step>1</increment-step>
     </value-attributes>
   </property>
+  <property>
+    <name>llap_heap_size</name>
+    <value>0</value>
+    <description>Heap Size used by LLAP app.</description>
+    <display-name>LLAP heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+      <property>
+        <type>hive-interactive-site</type>
+        <name>hive.llap.daemon.queue.name</name>
+      </property>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>slider_am_container_mb</name>
+    <value>341</value>
+    <description>Slider's app master container size in MB.</description>
+    <display-name>Slider AM container size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+      <property>
+        <type>hive-interactive-site</type>
+        <name>hive.llap.daemon.queue.name</name>
+      </property>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>llap_log_level</name>
+    <value>INFO</value>
+    <description>LLAP app logging level</description>
+    <display-name>LLAP app logging level</display-name>
+  </property>
 
   <!-- hive-env.sh -->
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/29179e76/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-site.xml
index 21d1109..bd65435 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/configuration/hive-interactive-site.xml
@@ -51,6 +51,28 @@ limitations under the License.
       <maximum>10</maximum>
       <increment-step>1</increment-step>
     </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>num_llap_nodes</name>
+      </property>
+      <property>
+        <type>hive-interactive-site</type>
+        <name>hive.llap.daemon.queue.name</name>
+      </property>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
@@ -172,21 +194,6 @@ limitations under the License.
   </property>
 
   <property>
-    <name>hive.server2.tez.sessions.per.default.queue</name>
-    <value>1</value>
-    <description>
-      The maximum number of queries this Hive LLAP cluster will be able to handle concurrently.
-    </description>
-    <display-name>Maximum Concurrent Queries</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>1</minimum>
-      <maximum>10</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-  </property>
-
-  <property>
     <name>hive.server2.tez.default.queues</name>
     <display-name>Default query queues</display-name>
     <value>default</value>
@@ -378,6 +385,28 @@ limitations under the License.
       <increment-step>256</increment-step>
       <overridable>false</overridable>
     </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>num_llap_nodes</name>
+      </property>
+      <property>
+        <type>hive-interactive-site</type>
+        <name>hive.llap.daemon.queue.name</name>
+      </property>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
@@ -391,6 +420,28 @@ limitations under the License.
       <maximum>8</maximum>
       <increment-step>1</increment-step>
     </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>num_llap_nodes</name>
+      </property>
+      <property>
+        <type>hive-interactive-site</type>
+        <name>hive.llap.daemon.queue.name</name>
+      </property>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>
@@ -416,6 +467,28 @@ limitations under the License.
       <increment-step>256</increment-step>
       <overridable>false</overridable>
     </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>enable_hive_interactive</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>llap_queue_capacity</name>
+      </property>
+      <property>
+        <type>hive-interactive-env</type>
+        <name>num_llap_nodes</name>
+      </property>
+      <property>
+        <type>hive-interactive-site</type>
+        <name>hive.llap.daemon.queue.name</name>
+      </property>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/29179e76/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/themes/theme.json
index 4e1a54b..818b970 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/themes/theme.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/HIVE/themes/theme.json
@@ -70,7 +70,7 @@
               "configs":[
                 "hive-interactive-env/enable_hive_interactive"
               ],
-              "if": "${hive-interactive-env/enable_hive_interactive}",
+              "if": "${hive-interactive-env/enable_hive_interactive} && ${hive-interactive-site/hive.llap.daemon.queue.name} === llap",
               "then": {
                 "property_value_attributes": {
                   "visible": true

http://git-wip-us.apache.org/repos/asf/ambari/blob/29179e76/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 7f63d0c..ae6cede 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -17,13 +17,18 @@ See the License for the specific language governing permissions and
 limitations under the License.
 """
 
+import math
+import traceback
+
 from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
 
 class HDP25StackAdvisor(HDP24StackAdvisor):
 
   def __init__(self):
     super(HDP25StackAdvisor, self).__init__()
     Logger.initialize_logger()
+    self.HIVE_INTERACTIVE_SITE = 'hive-interactive-site'
 
   def createComponentLayoutRecommendations(self, services, hosts):
     parentComponentLayoutRecommendations = super(HDP25StackAdvisor, self).createComponentLayoutRecommendations(
@@ -143,15 +148,15 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       putHiveInteractiveEnvProperty('enable_hive_interactive', 'true')
       putHiveInteractiveEnvProperty('hive_server_interactive_host', hsi_host)
 
-      if 'hive.llap.zk.sm.connectionString' in services['configurations']['hive-interactive-site']['properties']:
+      if 'hive.llap.zk.sm.connectionString' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
         # Fill the property 'hive.llap.zk.sm.connectionString' required by Hive Server Interactive (HiveServer2)
         zookeeper_host_port = self.getZKHostPortString(services)
         if zookeeper_host_port:
-          putHiveInteractiveSiteProperty = self.putProperty(configurations, "hive-interactive-site", services)
+          putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
           putHiveInteractiveSiteProperty("hive.llap.zk.sm.connectionString", zookeeper_host_port)
 
       # Update 'hive.llap.daemon.queue.name' if capacity scheduler is changed.
-      if 'hive.llap.daemon.queue.name' in services['configurations']['hive-interactive-site']['properties']:
+      if 'hive.llap.daemon.queue.name' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
         self.setLlapDaemonQueueName(services, configurations)
     else:
       putHiveInteractiveEnvProperty('enable_hive_interactive', 'false')
@@ -163,18 +168,493 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     super(HDP25StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)
 
     # Queue 'llap' creation/removal logic (Used by Hive Interactive server and associated LLAP)
-    hsi_hosts = self.__getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
-    if len(hsi_hosts) > 0:
-      if 'hive-interactive-env' in services['configurations'] and \
-          'enable_hive_interactive' in services['configurations']['hive-interactive-env']['properties']:
-        enable_hive_interactive = services['configurations']['hive-interactive-env']['properties']['enable_hive_interactive']
-        llap_queue_name = 'llap'
+    if 'hive-interactive-env' in services['configurations'] and \
+        'enable_hive_interactive' in services['configurations']['hive-interactive-env']['properties']:
+      enable_hive_interactive = services['configurations']['hive-interactive-env']['properties']['enable_hive_interactive']
+      LLAP_QUEUE_NAME = 'llap'
+
+      # Hive Server interactive is already added or getting added
+      if enable_hive_interactive == 'true':
+        self.checkAndManageLlapQueue(services, configurations, LLAP_QUEUE_NAME)
+        self.updateLlapConfigs(configurations, services, hosts, LLAP_QUEUE_NAME)
+      else:  # When Hive Interactive Server is in 'off/removed' state.
+        self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
+
+  """
+  Entry point for updating Hive's 'LLAP app' configs namely : (1). num_llap_nodes (2). hive.llap.daemon.yarn.container.mb
+    (3). hive.llap.daemon.num.executors (4). hive.llap.io.memory.size (5). llap_heap_size (6). slider_am_container_mb.
+
+    The trigger point for updating LLAP configs (mentioned above) is change in values of :
+    'llap_queue_capacity' or 'num_llap_nodes' or 'llap' named queue get selected for config 'hive.llap.daemon.queue.name'.
+
+    'llap_queue_capacity', 'hive.llap.daemon.queue.name' : Change detection for this property triggers change for all
+     the configs mentioned.
+
+    'num_llap_nodes' : If there is a change in value for 'num_llap_nodes', it's value is not updated/calulated,
+     but other dependent configs get calculated.
+  """
+  def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name):
+    putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
+    putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, self.HIVE_INTERACTIVE_SITE)
+
+    putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
+    putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
+
+    num_llap_nodes_in_changed_configs = False
+    llap_daemon_selected_queue_name = None
+    llap_queue_selected_in_current_call = None
+
+    try:
+      if self.HIVE_INTERACTIVE_SITE in services['configurations'] and \
+          'hive.llap.daemon.queue.name' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
+        llap_daemon_selected_queue_name =  services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']['hive.llap.daemon.queue.name']
+
+      if 'hive.llap.daemon.queue.name' in configurations[self.HIVE_INTERACTIVE_SITE]['properties']:
+        llap_queue_selected_in_current_call = configurations[self.HIVE_INTERACTIVE_SITE]['properties']['hive.llap.daemon.queue.name']
+
+      # Update Visibility of LLAP configs.
+      capacitySchedulerProperties = self.getCapacitySchedulerProperties(services)
+      if capacitySchedulerProperties:
+        # Get all leaf queues.
+        leafQueueNames = self.getAllYarnLeafQueues(capacitySchedulerProperties)
+        if (llap_daemon_selected_queue_name != None and llap_daemon_selected_queue_name == llap_queue_name) or \
+          (llap_queue_selected_in_current_call != None and llap_queue_selected_in_current_call == llap_queue_name):
+            putHiveInteractiveEnvPropertyAttribute("llap_queue_capacity", "visible", "true")
+            Logger.debug("Selected YARN queue is '{0}'. Setting LLAP queue capacity slider visibility to True".format(llap_queue_name))
+        else:
+          putHiveInteractiveEnvPropertyAttribute("llap_queue_capacity", "visible", "false")
+          Logger.debug("Queue selected for LLAP app is : '{0}'. Current YARN queues : {1}. "
+                    "Setting LLAP queue capacity slider visibility to False. "
+                    "Skipping updating values for LLAP related configs".format(llap_daemon_selected_queue_name, list(leafQueueNames)))
+          return
+      else:
+        Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive."
+                     " Not calculating LLAP configs.")
+        return
+
+      # Won't be calculating values if queues not equal to 2 and queue in use is not llap
+      if len(leafQueueNames) !=2:
+        return
+
+      # 'llap' queue exists at this point.
+      if 'changed-configurations' in services.keys():
+        llap_queue_prop_old_val = None
 
-        # Hive Server interactive is already added or getting added
-        if enable_hive_interactive == 'true':
-          self.checkAndManageLlapQueue(services, configurations, llap_queue_name)
-        else:  # When Hive Interactive Server is in 'off/removed' state.
-          self.checkAndStopLlapQueue(services, configurations, llap_queue_name)
+        # Calculations are triggered only if there is change in following props : 'llap_queue_capacity' or 'enable_hive_interactive',
+        # 'num_llap_nodes' or hive.llap.daemon.queue.name has change in value and selection is 'llap'
+        config_names_to_be_checked = set(['llap_queue_capacity', 'enable_hive_interactive', 'num_llap_nodes'])
+        changed_configs_in_hive_int_env = self.are_config_props_in_changed_configs(services, "hive-interactive-env",
+                                                                                   config_names_to_be_checked, False)
+
+        llap_queue_prop_in_changed_configs = self.are_config_props_in_changed_configs(services, self.HIVE_INTERACTIVE_SITE,
+                                                                                      set(['hive.llap.daemon.queue.name']), False)
+
+        if not changed_configs_in_hive_int_env and not llap_queue_prop_in_changed_configs:
+          Logger.info("LLAP parameters not modified. Not adjusting LLAP configs. "
+                       "Current changed-configuration received is : {0}".format(services["changed-configurations"]))
+          return
+
+        node_manager_hosts = self.get_node_manager_hosts(services, hosts)
+        node_manager_host_cnt = len(node_manager_hosts)
+
+        # If changed configurations contains 'num_llap_nodes' prop, we don't calulate it and use the same value.
+        num_llap_nodes_in_changed_configs = self.are_config_props_in_changed_configs(services, "hive-interactive-env",
+                                                                                     set(['num_llap_nodes']), False)
+
+        # Get value for prop 'num_llap_nodes'.
+        if not num_llap_nodes_in_changed_configs:
+          num_llap_nodes, num_llap_nodes_max_limit = self.calculate_num_llap_nodes(services, hosts, configurations)
+        else:
+          num_llap_nodes = self.get_num_llap_nodes(services)
+          num_llap_nodes_max_limit = node_manager_host_cnt
+
+        # Get calculated value for prop 'hive.llap.daemon.yarn.container.mb'
+        llap_container_size, llap_container_size_max_limit = self.calculate_llap_app_container_size(services, hosts, configurations)
+
+        # Get calculated value for prop 'hive.llap.daemon.num.executors'
+        num_executors_per_node, num_executors_per_node_max_limit = self.calculate_llap_daemon_executors_count(services,
+                                                                                                              llap_container_size)
+        assert (num_executors_per_node >= 0), "'Number of executors per node' : {0}. Expected value : > 0".format(
+          num_executors_per_node)
+
+        # Get calculated value for prop 'hive.llap.io.memory.size'
+        cache_size_per_node, cache_size_per_node_max_limit = self.calculate_llap_cache_size_per_executor(services,
+                                                                                                 llap_container_size,
+                                                                                                 num_executors_per_node)
+        assert (cache_size_per_node >= 0), "'Cache size per node' : {0}. Expected value : > 0".format(
+          cache_size_per_node)
+
+        # Get calculated value for prop 'llap_heap_size'
+        llap_xmx = self.calculate_llap_app_heap_size(services, num_executors_per_node)
+
+        # Get calculated Slider AM container Size
+        yarn_min_container_size = self.get_yarn_min_container_size(services)
+        slider_am_container_mb = self.calculate_slider_am_size(yarn_min_container_size)
+
+        num_llap_nodes = long(num_llap_nodes)
+        num_llap_nodes_max_limit= long(num_llap_nodes_max_limit)
+        llap_container_size = long(llap_container_size)
+        llap_container_size_max_limit = long(llap_container_size_max_limit)
+        num_executors_per_node = long(num_executors_per_node)
+        num_executors_per_node_max_limit = long(num_executors_per_node_max_limit)
+        cache_size_per_node = long(cache_size_per_node)
+        cache_size_per_node_max_limit = long(cache_size_per_node_max_limit)
+        llap_xmx = long(llap_xmx)
+        slider_am_container_mb = long(slider_am_container_mb)
+
+        Logger.info("Setting following value for 'hive.llap.daemon.yarn.container.mb'. Min : {0}, Curr: {1}, Max: {2}"\
+                    .format(llap_container_size, llap_container_size, llap_container_size_max_limit))
+        Logger.info("Setting following value for 'hive.llap.daemon.num.executors'. Min : {0}, Curr: {1}, Max: {2}"\
+                    .format('1', num_executors_per_node, num_executors_per_node_max_limit))
+        Logger.info("Setting following value for 'hive.llap.io.memory.size'. Min : {0}, Curr: {1}, Max: {2}"\
+                    .format('0', cache_size_per_node, cache_size_per_node_max_limit))
+        Logger.info("Setting following value for 'llap_heap_size'. Curr: {0}".format(llap_xmx))
+        Logger.info("Setting following value for 'slider_am_container_mb'. Curr: {0}".format(slider_am_container_mb))
+
+        if not num_llap_nodes_in_changed_configs:
+          Logger.info("Setting following value for 'num_llap_nodes'. Min : {0}, Curr: {1}, Max: {2}" \
+                      .format('1', num_llap_nodes, num_llap_nodes_max_limit))
+          putHiveInteractiveEnvProperty('num_llap_nodes', num_llap_nodes)
+          putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "minimum", 1)
+          putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "maximum", int(num_llap_nodes_max_limit))
+
+        putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', llap_container_size)
+        putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.yarn.container.mb', "minimum",
+                                                llap_container_size)
+        putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.yarn.container.mb', "maximum",
+                                                llap_container_size_max_limit)
+
+        putHiveInteractiveSiteProperty('hive.llap.daemon.num.executors', num_executors_per_node)
+        putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "minimum", 1)
+        putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "maximum",
+                                                num_executors_per_node_max_limit)
+
+        putHiveInteractiveSiteProperty('hive.llap.io.memory.size', cache_size_per_node)
+        putHiveInteractiveSitePropertyAttribute('hive.llap.io.memory.size', "minimum", 0)  # 0 -> Disables caching.
+        putHiveInteractiveSitePropertyAttribute('hive.llap.io.memory.size', "maximum",
+                                                cache_size_per_node_max_limit)
+
+        putHiveInteractiveEnvProperty('llap_heap_size', llap_xmx)
+
+        putHiveInteractiveEnvProperty('slider_am_container_mb', slider_am_container_mb)
+
+
+    except Exception as e:
+      Logger.info(e.message+" Skipping calculating LLAP configs.")
+      Logger.info(traceback.print_exc())
+
+  """
+  Checks for the presence of passed-in configuration properties in a given config, if they are changed.
+  Reads from services["changed-configurations"].
+  Parameters:
+     services: Configuration information for the cluster
+     config_type : Type of the configuration
+     config_names_set : Set of configuration properties to be checked if they are changed.
+     all_exists: If True : returns True only if all properties mentioned in 'config_names_set' we found
+                           in services["changed-configurations"].
+                           Otherwise, returns False.
+                 If False : return True, if any of the properties mentioned in config_names_set we found in
+                           services["changed-configurations"].
+                           Otherwise, returns False.
+  """
+  def are_config_props_in_changed_configs(self, services, config_type, config_names_set, all_exists):
+    changedConfigs = services["changed-configurations"]
+    changed_config_names_set = set()
+    for changedConfig in changedConfigs:
+      if changedConfig['type'] == config_type:
+        changed_config_names_set.update([changedConfig['name']])
+
+    if changed_config_names_set is None:
+      return False
+    else:
+      configs_intersection = changed_config_names_set.intersection(config_names_set)
+      if all_exists:
+        if configs_intersection == config_names_set:
+          return True
+      else:
+        if len(configs_intersection) > 0 :
+          return True
+    return False
+
+  """
+  Returns all the Node Manager configs in cluster.
+  """
+  def get_node_manager_hosts(self, services, hosts):
+    if len(hosts["items"]) > 0:
+      node_manager_hosts = self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts)
+      assert (node_manager_hosts is not None), "Information about NODEMANAGER not found in cluster."
+      return node_manager_hosts
+
+
+  """
+  Returns the current LLAP queue capacity percentage value. (llap_queue_capacity)
+  """
+  def get_llap_cap_percent_slider(self, services, configurations):
+    if 'llap_queue_capacity' in services['configurations']['hive-interactive-env']['properties']:
+      llap_slider_cap_percentage = float(
+        services['configurations']['hive-interactive-env']['properties']['llap_queue_capacity'])
+      if llap_slider_cap_percentage <= 0 :
+        if 'hive-interactive-env' in configurations and \
+            'llap_queue_capacity' in configurations["hive-interactive-env"]["properties"]:
+          llap_slider_cap_percentage = configurations["hive-interactive-env"]["properties"]["llap_queue_capacity"]
+      assert (llap_slider_cap_percentage > 0), "'llap_queue_capacity' is set to 0."
+      return llap_slider_cap_percentage
+
+
+  """
+  Returns current value of number of LLAP nodes in cluster (num_llap_nodes)
+  """
+  def get_num_llap_nodes(self, services):
+    if 'num_llap_nodes' in services['configurations']['hive-interactive-env']['properties']:
+      num_llap_nodes = float(
+        services['configurations']['hive-interactive-env']['properties']['num_llap_nodes'])
+      assert (num_llap_nodes > 0), "Number of LLAP nodes read : {0}. Expected value : > 0".format(
+        num_llap_nodes)
+      return num_llap_nodes
+    else:
+      raise Fail("Couldn't retrieve Hive Server interactive's 'num_llap_nodes' config.")
+
+  """
+  Calculates recommended and maximum LLAP nodes in the cluster.
+  """
+  def calculate_num_llap_nodes(self, services, hosts, configurations):
+    # TODO : Read NodeManager confis and figure the smallest sized NM.
+    size_of_smallest_nm = self.get_yarn_rm_mem_in_mb(services)
+    assert (
+      size_of_smallest_nm > 0), "Size of smallest NODEMANAGER calculated value : {0}. Expected value : > 0".format(
+      size_of_smallest_nm)
+    yarn_min_container_size = self.get_yarn_min_container_size(services)
+    node_size_usable = self._normalizeDown(size_of_smallest_nm, yarn_min_container_size)
+    cap_available_for_daemons = self.calculate_cap_available_for_llap_daemons(services, hosts, configurations)
+    num_llap_nodes = float(math.ceil(cap_available_for_daemons / node_size_usable))
+    assert (num_llap_nodes > 0), "Number of LLAP nodes calculated : {0}. Expected value : > 0".format(
+      num_llap_nodes)
+    # Maximum number of nodes that LLAP can use.
+    num_llap_nodes_max_limit = len(self.get_node_manager_hosts(services, hosts))
+    Logger.info("Calculated num_llap_nodes {3}, num_llap_nodes_max_limit : {4}, using following : "
+                "yarn_min_container_size : {0}, node_size_usable : {1}, cap_available_for_daemons :"
+                " {2}. ".format(yarn_min_container_size, node_size_usable, \
+                cap_available_for_daemons, num_llap_nodes, num_llap_nodes_max_limit))
+    return num_llap_nodes, num_llap_nodes_max_limit
+
+
+  """
+  Gets Tez container size (hive.tez.container.size)
+  """
+  def get_tez_container_size(self, services):
+    hive_container_size = 0
+    if 'hive.tez.container.size' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
+      hive_container_size = float(
+        services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']['hive.tez.container.size'])
+      assert (
+        hive_container_size > 0), "'hive.tez.container.size' current value : {0}. Expected value : > 0".format(
+        hive_container_size)
+    else:
+      raise Fail("Couldn't retrieve Hive Server interactive's 'hive.tez.container.size' config.")
+    return hive_container_size
+
+
+
+  """
+  Gets YARN's mimimum container size (yarn.scheduler.minimum-allocation-mb)
+  """
+  def get_yarn_min_container_size(self, services):
+    yarn_min_container_size = 0
+    if 'yarn.scheduler.minimum-allocation-mb' in services['configurations']['yarn-site']['properties']:
+      yarn_min_container_size = float(
+        services['configurations']['yarn-site']['properties']['yarn.scheduler.minimum-allocation-mb'])
+      assert (
+        yarn_min_container_size > 0), "'yarn.scheduler.minimum-allocation-mb' current value : {0}. Expected value : > 0".format(
+        yarn_min_container_size)
+    else:
+      raise Fail("Couldn't retrieve YARN's 'yarn.scheduler.minimum-allocation-mb' config.")
+    return yarn_min_container_size
+
+  """
+  Calculates recommended and maximum container size for LLAP app.
+  """
+  def calculate_llap_app_container_size(self, services, hosts, configurations):
+    cap_available_for_daemons = self.calculate_cap_available_for_llap_daemons(services, hosts, configurations)
+
+    node_manager_hosts = self.get_node_manager_hosts(services, hosts)
+    node_manager_host_cnt = len(node_manager_hosts)
+
+    num_llap_nodes_in_changed_configs = self.are_config_props_in_changed_configs(services, "hive-interactive-env", "num_llap_nodes", False)
+    if not num_llap_nodes_in_changed_configs:
+      num_llap_nodes, num_llap_nodes_max_limit = self.calculate_num_llap_nodes(services, hosts, configurations)
+    else:
+      num_llap_nodes = self.get_num_llap_nodes(services)
+
+    llap_container_size_raw = cap_available_for_daemons / num_llap_nodes
+    llap_container_size_raw_max_limit = cap_available_for_daemons / node_manager_host_cnt
+
+    yarn_min_container_size = self.get_yarn_min_container_size(services)
+
+    llap_container_size = self._normalizeDown(llap_container_size_raw, yarn_min_container_size)
+    llap_container_size_max_limit = self._normalizeDown(llap_container_size_raw_max_limit, yarn_min_container_size)
+    if llap_container_size_max_limit < llap_container_size:
+      llap_container_size_max_limit = llap_container_size
+    Logger.info("Calculated llap_container_size : {0}, llap_container_size_max_limit : {1}, using following : "
+                "cap_available_for_daemons : {2}, node_manager_host_cnt : {3}, llap_container_size_raw : {4}, "
+                "llap_container_size_raw_max_limit : {5}, yarn_min_container_size : {6} "\
+                .format(llap_container_size, llap_container_size_max_limit, cap_available_for_daemons, node_manager_host_cnt,
+                        llap_container_size_raw, llap_container_size_raw_max_limit, yarn_min_container_size))
+    return llap_container_size, llap_container_size_max_limit
+
+
+  def calculate_cap_available_for_llap_daemons(self, services, hosts, configurations):
+    llap_concurrency = 0
+    llap_slider_cap_percentage = self.get_llap_cap_percent_slider(services, configurations)
+    yarn_rm_mem_in_mb = self.get_yarn_rm_mem_in_mb(services)
+
+
+    node_manager_hosts = self.get_node_manager_hosts(services, hosts)
+    assert (node_manager_hosts is not None), "Information about NODEMANAGER not found in cluster."
+
+    total_cluster_cap = len(node_manager_hosts) * yarn_rm_mem_in_mb
+
+    total_llap_queue_size = float(llap_slider_cap_percentage) / 100 * total_cluster_cap
+
+    llap_daemon_container_size = self.get_hive_am_container_size(services)
+
+    yarn_min_container_size = self.get_yarn_min_container_size(services)
+
+    if 'hive.server2.tez.sessions.per.default.queue' in services['configurations'][self.HIVE_INTERACTIVE_SITE][
+      'properties']:
+      llap_concurrency = float(services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties'][
+                                 'hive.server2.tez.sessions.per.default.queue'])
+      assert (llap_concurrency > 0), "'hive.server2.tez.sessions.per.default.queue' current value : {0}. Expected value : > 0"\
+        .format(llap_concurrency)
+    else:
+      raise Fail("Couldn't retrieve Hive Server interactive's 'hive.server2.tez.sessions.per.default.queue' config.")
+
+    total_am_capacity_required = self._normalizeUp(llap_daemon_container_size, yarn_min_container_size) \
+                                 * llap_concurrency + self.calculate_slider_am_size(yarn_min_container_size)
+    cap_available_for_daemons = total_llap_queue_size - total_am_capacity_required
+    if cap_available_for_daemons < yarn_min_container_size :
+      raise Fail("'Capacity available for LLAP daemons'({0}) < 'YARN minimum container size'({1}). Invalid configuration detected. "
+                 "Increase LLAP queue size.".format(cap_available_for_daemons, yarn_min_container_size))
+    assert (
+      cap_available_for_daemons > 0), "'Capacity available for daemons' calculated value : {0}. Expected value : > 0".format(
+      cap_available_for_daemons)
+    Logger.info("Calculated cap_available_for_daemons : {0}, using following : llap_slider_cap_percentage : {1}, "
+                "yarn_rm_mem_in_mb : {2}, total_cluster_cap : {3}, total_llap_queue_size : {4}, llap_daemon_container_size"
+                " : {5}, yarn_min_container_size : {6}, llap_concurrency : {7}, total_am_capacity_required : {8}, "
+                .format(cap_available_for_daemons, llap_slider_cap_percentage, yarn_rm_mem_in_mb, total_cluster_cap,
+                        total_llap_queue_size, llap_daemon_container_size, yarn_min_container_size, llap_concurrency,
+                        total_am_capacity_required))
+    return cap_available_for_daemons
+
+  """
+  Calculates the Slider App Master size based on YARN's Minimum Container Size.
+  """
+  def calculate_slider_am_size(self, yarn_min_container_size):
+    if yarn_min_container_size > 1024:
+      return 1024
+    if yarn_min_container_size > 256 and yarn_min_container_size < 1024:
+      return yarn_min_container_size
+    if yarn_min_container_size < 256:
+      return 256
+
+  """
+  Gets YARN Resource Manager memory in MB (yarn.nodemanager.resource.memory-mb).
+  """
+  def get_yarn_rm_mem_in_mb(self, services):
+    if 'yarn-site' in services['configurations'] and \
+        'yarn.nodemanager.resource.memory-mb' in services['configurations']['yarn-site']['properties']:
+      yarn_rm_mem_in_mb = float(
+        services['configurations']['yarn-site']['properties']['yarn.nodemanager.resource.memory-mb'])
+      assert (
+        yarn_rm_mem_in_mb > 0.0), "'yarn.nodemanager.resource.memory-mb' current value : {0}. Expected value : > 0".format(
+        yarn_rm_mem_in_mb)
+    else:
+      raise Fail(
+        "Couldn't retrieve YARN's 'yarn.nodemanager.resource.memory-mb' config.")
+    return yarn_rm_mem_in_mb
+
+  """
+  Gets HIVE App Master container size (tez.am.resource.memory.mb)
+  """
+  def get_hive_am_container_size(self, services):
+    llap_daemon_container_size = 0
+    if self.HIVE_INTERACTIVE_SITE in services['configurations'] and \
+        'tez.am.resource.memory.mb' in services['configurations']['tez-interactive-site']['properties']:
+      llap_daemon_container_size = float(
+        services['configurations']['tez-interactive-site']['properties']['tez.am.resource.memory.mb'])
+      assert (
+        llap_daemon_container_size > 0), "'tez.am.resource.memory.mb' current value : {0}. Expected value : > 0".format(
+        llap_daemon_container_size)
+    else:
+      raise Fail("Couldn't retrieve Hive Server interactive's 'tez.am.resource.memory.mb' config.")
+    return llap_daemon_container_size
+
+
+  """
+  Calculates suggested and maximum value for number of LLAP executors.
+  """
+  def calculate_llap_daemon_executors_count(self, services, llap_container_size):
+    cpu_per_nm_host = 0
+    exec_to_cache_ratio = 1.5
+
+    hive_container_size = self.get_tez_container_size(services)
+
+    if 'yarn.nodemanager.resource.cpu-vcores' in services['configurations']['yarn-site']['properties']:
+      cpu_per_nm_host = float(services['configurations']['yarn-site']['properties'][
+                                'yarn.nodemanager.resource.cpu-vcores'])
+      assert (cpu_per_nm_host > 0), "'yarn.nodemanager.resource.cpu-vcores' current value : {0}. Expected value : > 0"\
+        .format(cpu_per_nm_host)
+    else:
+      raise Fail("Couldn't retrieve YARN's 'yarn.nodemanager.resource.cpu-vcores' config.")
+
+    mem_per_executor = hive_container_size * exec_to_cache_ratio;
+    if mem_per_executor > llap_container_size:
+      mem_per_executor = llap_container_size
+
+    num_executors_per_node_raw = math.floor(llap_container_size / mem_per_executor)
+    num_executors_per_node = min(num_executors_per_node_raw, cpu_per_nm_host)
+    # Allow 4x over-subscription of CPU as a max value
+    num_executors_per_node_max_limit = min(num_executors_per_node_raw, 4 * cpu_per_nm_host)
+    Logger.info("calculated num_executors_per_node: {0}, num_executors_per_node_max_limit : {1}, using following "
+                ":  hive_container_size : {2}, cpu_per_nm_host : {3}, mem_per_executor : {4}, num_executors_per_node_raw : {5}"
+                .format(num_executors_per_node, num_executors_per_node_max_limit, hive_container_size,
+                        cpu_per_nm_host, mem_per_executor, num_executors_per_node_raw))
+    return num_executors_per_node, num_executors_per_node_max_limit
+
+
+  """
+  Calculates suggested and maximum value for LLAP cache size per node.
+  """
+  def calculate_llap_cache_size_per_executor(self, services, llap_container_size, num_executors_per_node):
+    hive_container_size = self.get_tez_container_size(services)
+    cache_size_per_node = llap_container_size - (num_executors_per_node * hive_container_size)
+    # Reserved memory for minExecutors, which is 1.
+    cache_size_per_node_max_limit = llap_container_size - (1 * hive_container_size)
+    Logger.info("Calculated cache_size_per_node : {0}, cache_size_per_node_max_limit : {1}, using following : "
+                "hive_container_size : {2}, llap_container_size : {3}, num_executors_per_node : {4}"
+                .format(cache_size_per_node, cache_size_per_node_max_limit, hive_container_size, llap_container_size,
+                        num_executors_per_node))
+    return cache_size_per_node, cache_size_per_node_max_limit
+
+
+  """
+  Calculates recommended heap size for LLAP app.
+  """
+  def calculate_llap_app_heap_size(self, services, num_executors_per_node):
+    hive_container_size = self.get_tez_container_size(services)
+    total_mem_for_executors = num_executors_per_node * hive_container_size
+    llap_app_heap_size = max(total_mem_for_executors * 0.8, total_mem_for_executors - 1024)
+    Logger.info("Calculated llap_app_heap_size : {0}, using following : hive_container_size : {1}, "
+                "total_mem_for_executors : {2}".format(llap_app_heap_size, hive_container_size, total_mem_for_executors))
+    return max(total_mem_for_executors * 0.8, total_mem_for_executors - 1024)
+
+
+  def _normalizeDown(self, val1, val2):
+    tmp = math.floor(val1 / val2);
+    return tmp * val2;
+
+  def _normalizeUp(self, val1, val2):
+    numContainers = math.ceil(val1 / val2)
+    return numContainers * val2
 
   """
   Checks and (1). Creates 'llap' queue if only 'default' queue exist at leaf level and is consuming 100% capacity OR
@@ -183,7 +663,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
   def checkAndManageLlapQueue(self, services, configurations, llap_queue_name):
     DEFAULT_LLAP_QUEUE_CAP_PERCENT = 20
     putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
-    putHiveInteractiveSiteProperty = self.putProperty(configurations, "hive-interactive-site", services)
+    putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
     putCapSchedProperty = self.putProperty(configurations, "capacity-scheduler", services)
 
     capacitySchedulerProperties = self.getCapacitySchedulerProperties(services)
@@ -191,16 +671,15 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     if capacitySchedulerProperties:
       # Get the llap Cluster percentage used for 'llap' Queue creation
       if 'llap_queue_capacity' in services['configurations']['hive-interactive-env']['properties']:
-        llapSliderCapPercentage = int(
+        llap_slider_cap_percentage = int(
           services['configurations']['hive-interactive-env']['properties']['llap_queue_capacity'])
-        if llapSliderCapPercentage not in range(1,101):
-          Logger.debug("Adjusting HIVE 'llap_queue_capacity' from {0}% to {1}%".format(llapSliderCapPercentage, DEFAULT_LLAP_QUEUE_CAP_PERCENT))
-          llapSliderCapPercentage = DEFAULT_LLAP_QUEUE_CAP_PERCENT  # Set the default value to 20.
-          putHiveInteractiveEnvProperty('llap_queue_capacity', llapSliderCapPercentage)
+        if llap_slider_cap_percentage not in range(1,101):
+          Logger.info("Adjusting HIVE 'llap_queue_capacity' from {0}% to {1}%".format(llap_slider_cap_percentage, DEFAULT_LLAP_QUEUE_CAP_PERCENT))
+          llap_slider_cap_percentage = DEFAULT_LLAP_QUEUE_CAP_PERCENT  # Set the default value to 20.
+          putHiveInteractiveEnvProperty('llap_queue_capacity', llap_slider_cap_percentage)
       else:
         Logger.error("Problem retrieving LLAP Queue Capacity. Skipping creating {0} queue".format(llap_queue_name))
         return
-
       leafQueueNames = self.getAllYarnLeafQueues(capacitySchedulerProperties)
       capSchedConfigKeys = capacitySchedulerProperties.keys()
 
@@ -218,21 +697,29 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
       if 'yarn.scheduler.capacity.root.'+llap_queue_name+'.capacity' in capSchedConfigKeys:
         currLlapQueueCap = capacitySchedulerProperties.get('yarn.scheduler.capacity.root.'+llap_queue_name+'.capacity')
 
+      if self.HIVE_INTERACTIVE_SITE in services['configurations'] and \
+          'hive.llap.daemon.queue.name' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
+        llap_daemon_selected_queue_name =  services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']['hive.llap.daemon.queue.name']
+      else:
+        Logger.debug("Couldn't retrive 'hive.llap.daemon.queue.name' property. Skipping adjusting queues.")
+        return
       updated_cap_sched_configs = ''
 
+      enabled_hive_int_in_changed_configs = self.are_config_props_in_changed_configs(services, "hive-interactive-env",
+                                                                                   set(['enable_hive_interactive']), False)
       """
       We create OR "modify 'llap' queue 'state and/or capacity' " based on below conditions:
        - if only 1 queue exists at root level and is 'default' queue and has 100% cap -> Create 'llap' queue,  OR
        - if 2 queues exists at root level ('llap' and 'default') :
-           - 'llap' queue state is STOPPED -> Modify 'llap' queue state to RUNNING, adjust capacity, OR
-           - 'llap' queue state is RUNNING and 'llap_queue_capacity' prop != 'llap' queue current running capacity ->
+           - Queue selected is 'llap' and state is STOPPED -> Modify 'llap' queue state to RUNNING, adjust capacity, OR
+           - Queue selected is 'llap', state is RUNNING and 'llap_queue_capacity' prop != 'llap' queue current running capacity ->
               Modify 'llap' queue capacity to 'llap_queue_capacity'
       """
       if 'default' in leafQueueNames and \
         ((len(leafQueueNames) == 1 and int(yarn_default_queue_capacity) == 100) or \
         ((len(leafQueueNames) == 2 and llap_queue_name in leafQueueNames) and \
-           (currLlapQueueState == 'STOPPED' or (currLlapQueueState == 'RUNNING' and currLlapQueueCap != llapSliderCapPercentage)))):
-        adjusted_default_queue_cap = str(int(yarn_default_queue_capacity) - llapSliderCapPercentage)
+           ((currLlapQueueState == 'STOPPED' and enabled_hive_int_in_changed_configs) or (currLlapQueueState == 'RUNNING' and currLlapQueueCap != llap_slider_cap_percentage)))):
+        adjusted_default_queue_cap = str(int(yarn_default_queue_capacity) - llap_slider_cap_percentage)
         for prop, val in capacitySchedulerProperties.items():
           if llap_queue_name not in prop:
             if prop == 'yarn.scheduler.capacity.root.queues':
@@ -247,7 +734,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
             elif prop.startswith('yarn.') and '.llap.' not in prop:
               updated_cap_sched_configs = updated_cap_sched_configs + prop + "=" + val + "\n"
 
-        llapSliderCapPercentage = str(llapSliderCapPercentage)
+        llap_slider_cap_percentage = str(llap_slider_cap_percentage)
         hive_user = '*'  # Open to all
         if 'hive_user' in services['configurations']['hive-env']['properties']:
           hive_user = services['configurations']['hive-env']['properties']['hive_user']
@@ -259,9 +746,9 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
                                     + "yarn.scheduler.capacity.root." + llap_queue_name + ".ordering-policy=fifo\n" \
                                     + "yarn.scheduler.capacity.root." + llap_queue_name + ".minimum-user-limit-percent=100\n" \
                                     + "yarn.scheduler.capacity.root." + llap_queue_name + ".maximum-capacity=" \
-                                    + llapSliderCapPercentage + "\n" \
+                                    + llap_slider_cap_percentage + "\n" \
                                     + "yarn.scheduler.capacity.root." + llap_queue_name + ".capacity=" \
-                                    + llapSliderCapPercentage + "\n" \
+                                    + llap_slider_cap_percentage + "\n" \
                                     + "yarn.scheduler.capacity.root." + llap_queue_name + ".acl_submit_applications=" \
                                     + hive_user + "\n" \
                                     + "yarn.scheduler.capacity.root." + llap_queue_name + ".acl_administer_queue=" \
@@ -272,9 +759,9 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
           putCapSchedProperty("capacity-scheduler", updated_cap_sched_configs)
           if len(leafQueueNames) == 1: # 'llap' queue didn't exist before
             Logger.info("Created YARN Queue : '{0}' with capacity : {1}%. Adjusted default queue capacity to : {2}%" \
-                      .format(llap_queue_name, llapSliderCapPercentage, adjusted_default_queue_cap))
+                      .format(llap_queue_name, llap_slider_cap_percentage, adjusted_default_queue_cap))
           else: # Queue existed, only adjustments done.
-            Logger.info("Adjusted YARN Queue : '{0}'. Current capacity : {1}%. State: RUNNING.".format(llap_queue_name, llapSliderCapPercentage))
+            Logger.info("Adjusted YARN Queue : '{0}'. Current capacity : {1}%. State: RUNNING.".format(llap_queue_name, llap_slider_cap_percentage))
             Logger.info("Adjusted 'default' queue capacity to : {0}%".format(adjusted_default_queue_cap))
 
           # Update Hive 'hive.llap.daemon.queue.name' prop to use 'llap' queue.
@@ -296,10 +783,8 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
   """
   def checkAndStopLlapQueue(self, services, configurations, llap_queue_name):
     putCapSchedProperty = self.putProperty(configurations, "capacity-scheduler", services)
-    putHiveInteractiveSiteProperty = self.putProperty(configurations, "hive-interactive-site", services)
-
+    putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
     capacitySchedulerProperties = self.getCapacitySchedulerProperties(services)
-
     updated_default_queue_configs = ''
     updated_llap_queue_configs = ''
     if capacitySchedulerProperties:
@@ -314,7 +799,6 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
         else:
           Logger.error("{0} queue 'state' property not present in capacity scheduler. Skipping adjusting queues.".format(llap_queue_name))
           return
-
         if currLlapQueueState == 'RUNNING':
           DEFAULT_MAX_CAPACITY = '100'
           for prop, val in capacitySchedulerProperties.items():
@@ -362,7 +846,7 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
   Checks and sets the 'Hive Server Interactive' 'hive.llap.daemon.queue.name' config.
   """
   def setLlapDaemonQueueName(self, services, configurations):
-    putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-site")
+    putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, self.HIVE_INTERACTIVE_SITE)
     capacitySchedulerProperties = dict()
 
     # Read 'capacity-scheduler' from configurations if we modified and added recommendation to it, as part of current
@@ -493,4 +977,4 @@ class HDP25StackAdvisor(HDP24StackAdvisor):
     hosts_for_component = []
     if serviceName in servicesList:
       hosts_for_component = [component["hostnames"] for component in componentsList if component["component_name"] == componentName][0]
-    return hosts_for_component
+    return hosts_for_component
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/29179e76/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py b/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
index 44759a9..5712995 100644
--- a/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
+++ b/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
@@ -439,7 +439,7 @@ class TestHiveServerInteractive(RMFTestCase):
     mock_get_llap_app_status_data.return_value = llap_app_json
 
     status = self.hsi.check_llap_app_status(self.llap_app_name, -1)
-    self.assertEqual(status, True)
+    self.assertEqual(status, False)
 
   @patch("time.sleep")
   @patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info')
@@ -450,7 +450,7 @@ class TestHiveServerInteractive(RMFTestCase):
     mock_get_llap_app_status_data.return_value = llap_app_json
 
     status = self.hsi.check_llap_app_status(self.llap_app_name, 0)
-    self.assertEqual(status, True)
+    self.assertEqual(status, False)
 
   @patch("time.sleep")
   @patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info')
@@ -461,7 +461,7 @@ class TestHiveServerInteractive(RMFTestCase):
     mock_get_llap_app_status_data.return_value = llap_app_json
 
     status = self.hsi.check_llap_app_status(self.llap_app_name, 2)
-    self.assertEqual(status, True)
+    self.assertEqual(status, False)