You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by mp...@apache.org on 2015/01/05 19:25:33 UTC

ambari git commit: AMBARI-8983. Create upgrade catalogs for 2.0 and 2.1 stacks. (mpapirkovskyy)

Repository: ambari
Updated Branches:
  refs/heads/trunk ccda41901 -> f28975c41


AMBARI-8983. Create upgrade catalogs for 2.0 and 2.1 stacks. (mpapirkovskyy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f28975c4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f28975c4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f28975c4

Branch: refs/heads/trunk
Commit: f28975c41dc8de71b4f74e23f80522a055dfa69a
Parents: ccda419
Author: Myroslav Papirkovskyy <mp...@hortonworks.com>
Authored: Mon Jan 5 20:22:19 2015 +0200
Committer: Myroslav Papirkovskyy <mp...@hortonworks.com>
Committed: Mon Jan 5 20:22:19 2015 +0200

----------------------------------------------------------------------
 ambari-server/src/main/python/upgradeHelper.py  | 109 +++++--
 .../catalog/UpgradeCatalog_2.0_to_2.2.json      | 272 +++++++++++++++++
 .../catalog/UpgradeCatalog_2.1_to_2.2.json      | 289 +++++++++++++++++++
 3 files changed, 652 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f28975c4/ambari-server/src/main/python/upgradeHelper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/upgradeHelper.py b/ambari-server/src/main/python/upgradeHelper.py
index 57592ad..7a998a3 100644
--- a/ambari-server/src/main/python/upgradeHelper.py
+++ b/ambari-server/src/main/python/upgradeHelper.py
@@ -19,6 +19,63 @@ limitations under the License.
 '''
 
 
+"""
+Upgrade catalog file format description:
+
+
+Format version 1.0
+
+Global section description:
+  STACKNAME - name of stack, for example HDP
+  OLDVERSION - version of stack from which upgrade should be done, used by fromStack script argument
+  NEWVERSION - version of stack to which upgrade should be done, used by toStack script argument
+
+Sub-section options:
+  config-types - contains global per-config settings
+    merged-copy - would merge latest server properties with properties defined in "properties" section,
+                  without this option server properties would be rewritten by properties defined in "properties" section
+
+Sub-section properties - Contains property definition
+Sub-section property-mapping(optional) - contains mapping of property names in case, if some property changed their name in NEWVERSION
+
+Example:
+
+{
+  "version": "1.0",
+  "stacks": [
+    {
+      "name": "STACKNAME",
+      "old-version": "OLDVERSION",
+      "target-version": "NEWVERSION",
+      "options": {
+        "config-types": {
+          "CONFIGTYPE1": {
+            "merged-copy": "yes"
+          }
+        }
+      },
+      "properties": {
+        "CONFIGTYPE1": {
+          "some_property": "some property value",
+          "some_second_property: {
+             "remove": "yes"
+          },
+          "template_property": {
+           "value": "{TEMPLATE_TAG}",
+           "template": "yes"
+          }
+        }
+      },
+     "property-mapping": {
+       "old-property-name": "new-property-name"
+     }
+    }
+  ]
+}
+
+More examples available in ambari-server/src/main/resources/upgrade/catalog/
+"""
+
 import getpass
 import optparse
 from pprint import pprint
@@ -174,7 +231,7 @@ class Options(Const):
 # ==============================
 #    Catalog classes definition
 # ==============================
-class UpgradeCatalogFarm(object):
+class UpgradeCatalogFactory(object):
 
    # versions of catalog which is currently supported
   _supported_catalog_versions = ["1.0"]
@@ -235,7 +292,7 @@ class UpgradeCatalog(object):
   # private variables
   _json_catalog = None
   _properties_catalog = None
-  _properties_map_catalog = None
+  _properties_map_catalog = {}  # Initially should be assigned empty dictionary as default value
   _version = None
   _search_pattern = None
 
@@ -559,9 +616,12 @@ def add_services():
              validate=True, validate_expect_body=False, request_type="POST")
 
 
-def update_config(properties, config_type):
+def update_config(properties, config_type, attributes=None):
   tag = "version" + str(int(time.time() * 1000))
   properties_payload = {"Clusters": {"desired_config": {"type": config_type, "tag": tag, "properties": properties}}}
+  if attributes is not None:
+    properties_payload["Clusters"]["desired_config"]["properties_attributes"] = attributes
+
   expect_body = config_type != "cluster-env"  # ToDo: make exceptions more flexible
 
   curl(Options.CLUSTER_URL, request_type="PUT", data=properties_payload, validate=True,
@@ -582,14 +642,20 @@ def get_zookeeper_quorum():
 def get_config(cfg_type):
   tag, structured_resp = get_config_resp(cfg_type)
   properties = None
+  properties_attributes = None
+
   if 'items' in structured_resp:
     for item in structured_resp['items']:
       if (tag == item['tag']) or (cfg_type == item['type']):
-        properties = item['properties']
+        if 'properties' in item:
+          properties = item['properties']
+        if 'properties_attributes' in item:
+          properties_attributes = item['properties_attributes']
+        break
   if properties is None:
     raise FatalException(-1, "Unable to read configuration for type " + cfg_type + " and tag " + tag)
 
-  return properties
+  return properties, properties_attributes
 
 
 def parse_config_resp(resp):
@@ -607,7 +673,7 @@ def get_config_resp(cfg_type, error_if_na=True, parsed=False, tag=None):
   CONFIG_URL_FORMAT = Options.CLUSTER_URL + '/configurations?type={0}&tag={1}'
 
   # Read the config version
-  if tag in None:
+  if tag is None:
     structured_resp = curl(Options.CLUSTER_URL, validate=True, validate_expect_body=True, parse=True, simulate=False)
 
     if 'Clusters' in structured_resp:
@@ -654,7 +720,8 @@ def get_config_resp_all():
     all_options)
 
   for item in all_options:
-    desired_configs[item["type"]] = item["properties"]
+    if CatConst.STACK_PROPERTIES in item:  # config item could not contain anu property
+      desired_configs[item["type"]] = item["properties"]
 
   return desired_configs
 
@@ -682,9 +749,10 @@ def modify_config_item(config_type, catalog):
   catalog.set_substitution_handler(_substitute)
 
   try:
-    properties_latest = rename_all_properties(get_config(config_type), catalog.property_map_catalog)
+    properties_latest, properties_attributes_latest = rename_all_properties(get_config(config_type), catalog.property_map_catalog)
   except Exception as e:
     properties_latest = {}
+    properties_attributes_latest = None
 
   properties_copy = catalog.get_properties(config_type)
   is_merged_copy = CatConst.MERGED_COPY_TAG in catalog.config_groups.get(config_type) \
@@ -699,7 +767,7 @@ def modify_config_item(config_type, catalog):
   if is_merged_copy:  # Append configs to existed ones
     tag, structured_resp = get_config_resp(config_type, False)
     if structured_resp is not None:
-      update_config_using_existing_properties(config_type, properties_copy, properties_latest, catalog)
+      update_config_using_existing_properties(config_type, properties_copy, properties_latest, properties_attributes_latest, catalog)
   else:  # Rewrite/create config items
     update_config(catalog.get_properties_as_dict(properties_copy), config_type)
 
@@ -710,7 +778,7 @@ def modify_configs():
   else:
     config_type = None
 
-  catalog_farm = UpgradeCatalogFarm(Options.OPTIONS.upgrade_json)  # Load upgrade catalog
+  catalog_farm = UpgradeCatalogFactory(Options.OPTIONS.upgrade_json)  # Load upgrade catalog
   catalog = catalog_farm.get_catalog(Options.OPTIONS.from_stack, Options.OPTIONS.to_stack)  # get desired version of catalog
 
   if catalog is None:
@@ -735,14 +803,9 @@ def rename_all_properties(properties, name_mapping):
   return properties
 
 
-def update_config_using_existing(conf_type, properties_template, catalog):
-  site_properties = get_config(conf_type)
-  update_config_using_existing_properties(conf_type, properties_template, site_properties, catalog)
-
-
 # properties template - passed as dict from UpgradeCatalog
 def update_config_using_existing_properties(conf_type, properties_template,
-                                            site_properties, catalog):
+                                            site_properties, properties_attributes_latest, catalog):
   keys_processed = []
   keys_to_delete = []
   properties_parsed = catalog.get_properties_as_dict(properties_template)
@@ -759,7 +822,17 @@ def update_config_using_existing_properties(conf_type, properties_template,
   for key in keys_to_delete:
     del properties_parsed[key]
 
-  update_config(properties_parsed, conf_type)
+  # check property attributes list
+  if properties_attributes_latest is not None:
+    for key in properties_attributes_latest:
+      properties_attributes_latest[key] = dict(filter(
+        lambda (item_key, item_value): item_key not in keys_to_delete,
+        zip(properties_attributes_latest[key].keys(), properties_attributes_latest[key].values())
+      ))
+
+
+
+  update_config(properties_parsed, conf_type, attributes=properties_attributes_latest)
 
 
 def backup_configs(conf_type=None):
@@ -1015,7 +1088,7 @@ def verify_configuration():
   else:
     config_type = None
 
-  catalog_farm = UpgradeCatalogFarm(Options.OPTIONS.upgrade_json)  # Load upgrade catalog
+  catalog_farm = UpgradeCatalogFactory(Options.OPTIONS.upgrade_json)  # Load upgrade catalog
   catalog = catalog_farm.get_catalog(Options.OPTIONS.from_stack, Options.OPTIONS.to_stack)  # get desired version of catalog
 
   if catalog is None:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f28975c4/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.json
new file mode 100644
index 0000000..70ae3dc
--- /dev/null
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.json
@@ -0,0 +1,272 @@
+{
+  "version": "1.0",
+  "stacks": [
+    {
+      "name": "HDP",
+      "old-version": "2.0",
+      "target-version": "2.2",
+      "options": {
+        "config-types": {
+          "core-site": {
+            "merged-copy": "yes"
+          },
+          "hdfs-site": {
+            "merged-copy": "yes"
+          },
+          "hbase-site": {
+            "merged-copy": "yes"
+          },
+          "hive-site": {
+            "merged-copy": "yes"
+          },
+          "yarn-site": {
+            "merged-copy": "yes"
+          },
+          "mapred-site": {
+            "merged-copy": "yes"
+          },
+          "oozie-site": {
+            "merged-copy": "yes"
+          },
+          "webhcat-site": {
+            "merged-copy": "yes"
+          }
+        }
+      },
+      "properties": {
+        "webhcat-site": {
+          "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
+          "templeton.port": "50111"
+        },
+        "oozie-site": {
+          "oozie.authentication.simple.anonymous.allowed": "true",
+          "oozie.service.coord.check.maximum.frequency": "false",
+          "oozie.service.ELService.ext.functions.coord-action-create": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
+          "oozie.service.ELService.ext.functions.coord-action-create-inst": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
+          "oozie.service.ELService.ext.functions.coord-action-start": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,user=org.apache.oozie.coord.CoordELFu
 nctions#coord_user",
+          "oozie.service.ELService.ext.functions.coord-job-submit-data": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
+          "oozie.service.ELService.ext.functions.coord-job-submit-instances": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo",
+          "oozie.service.ELService.ext.functions.coord-sla-create": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
+          "oozie.service.ELService.ext.functions.coord-sla-submit": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
+          "oozie.service.HadoopAccessorService.kerberos.enabled": "false",
+          "oozie.service.HadoopAccessorService.supported.filesystems": "*",
+          "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd",
+          "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService"
+        },
+        "mapred-site": {
+          "mapreduce.job.emit-timeline-data": "false",
+          "mapreduce.jobhistory.bind-host": "0.0.0.0",
+          "mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
+          "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
+          "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
+          "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
+          "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
+          "mapreduce.map.java.opts": "-Xmx546m",
+          "mapreduce.map.memory.mb": "682",
+          "mapreduce.reduce.java.opts": "-Xmx546m",
+          "mapreduce.task.io.sort.mb": "273",
+          "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
+          "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}",
+          "yarn.app.mapreduce.am.resource.mb": "682",
+          "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
+          "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
+          "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64"
+        },
+        "yarn-site": {
+          "yarn.timeline-service.leveldb-timeline-store.path": "/var/log/hadoop-yarn/timeline",
+          "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000",
+          "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore",
+          "yarn.timeline-service.ttl-enable": "true",
+          "yarn.timeline-service.ttl-ms": "2678400000",
+          "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore",
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.client.nodemanager-connect.max-wait-ms": "900000",
+          "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
+          "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
+          "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
+          "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
+          "yarn.nodemanager.bind-host": "0.0.0.0",
+          "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
+          "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
+          "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
+          "yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
+          "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false",
+          "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
+          "yarn.nodemanager.log-aggregation.debug-enabled": "false",
+          "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30",
+          "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
+          "yarn.nodemanager.recovery.dir": "/var/log/hadoop-yarn/nodemanager/recovery-state",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.nodemanager.resource.cpu-vcores": "1",
+          "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
+          "yarn.resourcemanager.bind-host": "0.0.0.0",
+          "yarn.resourcemanager.connect.max-wait.ms": "900000",
+          "yarn.resourcemanager.connect.retry-interval.ms": "30000",
+          "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
+          "yarn.resourcemanager.fs.state-store.uri": " ",
+          "yarn.resourcemanager.ha.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
+          "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
+          "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
+          "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
+          "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
+          "yarn.resourcemanager.work-preserving-recovery.enabled": "false",
+          "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
+          "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
+          "yarn.resourcemanager.zk-address": "localhost:2181",
+          "yarn.resourcemanager.zk-num-retries": "1000",
+          "yarn.resourcemanager.zk-retry-interval-ms": "1000",
+          "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
+          "yarn.resourcemanager.zk-timeout-ms": "10000",
+          "yarn.timeline-service.bind-host": "0.0.0.0",
+          "yarn.timeline-service.client.max-retries": "30",
+          "yarn.timeline-service.client.retry-interval-ms": "1000",
+          "yarn.timeline-service.enabled": "true",
+          "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
+          "yarn.timeline-service.http-authentication.type": "simple",
+          "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
+          "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
+          "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000"
+        },
+        "hive-site": {
+          "hive.execution.engine": "mr",
+          "hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
+          "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
+          "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
+          "datanucleus.cache.level2.type": "none",
+          "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
+          "hive.cbo.enable": "true",
+          "hive.cli.print.header": "false",
+          "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
+          "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
+          "hive.compactor.abortedtxn.threshold": "1000",
+          "hive.compactor.check.interval": "300L",
+          "hive.compactor.delta.num.threshold": "10",
+          "hive.compactor.delta.pct.threshold": "0.1f",
+          "hive.compactor.initiator.on": "false",
+          "hive.compactor.worker.threads": "0",
+          "hive.compactor.worker.timeout": "86400L",
+          "hive.compute.query.using.stats": "true",
+          "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
+          "hive.convert.join.bucket.mapjoin.tez": "false",
+          "hive.enforce.sortmergebucketmapjoin": "true",
+          "hive.exec.compress.intermediate": "false",
+          "hive.exec.compress.output": "false",
+          "hive.exec.dynamic.partition": "true",
+          "hive.exec.dynamic.partition.mode": "nonstrict",
+          "hive.exec.max.created.files": "100000",
+          "hive.exec.max.dynamic.partitions": "5000",
+          "hive.exec.max.dynamic.partitions.pernode": "2000",
+          "hive.exec.orc.compression.strategy": "SPEED",
+          "hive.exec.orc.default.compress": "ZLIB",
+          "hive.exec.orc.default.stripe.size": "67108864",
+          "hive.exec.parallel": "false",
+          "hive.exec.parallel.thread.number": "8",
+          "hive.exec.reducers.bytes.per.reducer": "67108864",
+          "hive.exec.reducers.max": "1009",
+          "hive.exec.scratchdir": "/tmp/hive",
+          "hive.exec.submit.local.task.via.child": "true",
+          "hive.exec.submitviachild": "false",
+          "hive.fetch.task.aggr": "false",
+          "hive.fetch.task.conversion": "more",
+          "hive.fetch.task.conversion.threshold": "1073741824",
+          "hive.limit.optimize.enable": "true",
+          "hive.limit.pushdown.memory.usage": "0.04",
+          "hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
+          "hive.map.aggr.hash.min.reduction": "0.5",
+          "hive.map.aggr.hash.percentmemory": "0.5",
+          "hive.mapjoin.optimized.hashtable": "true",
+          "hive.merge.mapfiles": "true",
+          "hive.merge.mapredfiles": "false",
+          "hive.merge.orcfile.stripe.level": "true",
+          "hive.merge.rcfile.block.level": "true",
+          "hive.merge.size.per.task": "256000000",
+          "hive.merge.smallfiles.avgsize": "16000000",
+          "hive.merge.tezfiles": "false",
+          "hive.metastore.authorization.storage.checks": "false",
+          "hive.metastore.client.connect.retry.delay": "5s",
+          "hive.metastore.connect.retries": "24",
+          "hive.metastore.failure.retries": "24",
+          "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
+          "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
+          "hive.metastore.server.max.threads": "100000",
+          "hive.optimize.constant.propagation": "true",
+          "hive.optimize.metadataonly": "true",
+          "hive.optimize.null.scan": "true",
+          "hive.optimize.sort.dynamic.partition": "false",
+          "hive.orc.compute.splits.num.threads": "10",
+          "hive.orc.splits.include.file.footer": "false",
+          "hive.prewarm.enabled": "false",
+          "hive.prewarm.numcontainers": "10",
+          "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
+          "hive.security.metastore.authorization.auth.reads": "true",
+          "hive.server2.allow.user.substitution": "true",
+          "hive.server2.authentication.spnego.keytab": "HTTP/_HOST@EXAMPLE.COM",
+          "hive.server2.authentication.spnego.principal": "/etc/security/keytabs/spnego.service.keytab",
+          "hive.server2.logging.operation.enabled": "true",
+          "hive.server2.logging.operation.log.location": "${system:java.io.tmpdir}/${system:user.name}/operation_logs",
+          "hive.server2.table.type.mapping": "CLASSIC",
+          "hive.server2.tez.default.queues": "default",
+          "hive.server2.tez.sessions.per.default.queue": "1",
+          "hive.server2.thrift.http.path": "cliservice",
+          "hive.server2.thrift.http.port": "10001",
+          "hive.server2.thrift.max.worker.threads": "500",
+          "hive.server2.thrift.sasl.qop": "auth",
+          "hive.server2.transport.mode": "binary",
+          "hive.server2.use.SSL": "false",
+          "hive.smbjoin.cache.rows": "10000",
+          "hive.stats.autogather": "true",
+          "hive.stats.dbclass": "fs",
+          "hive.stats.fetch.column.stats": "false",
+          "hive.stats.fetch.partition.stats": "true",
+          "hive.support.concurrency": "false",
+          "hive.tez.auto.reducer.parallelism": "false",
+          "hive.tez.cpu.vcores": "-1",
+          "hive.tez.dynamic.partition.pruning": "true",
+          "hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
+          "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
+          "hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat",
+          "hive.tez.log.level": "INFO",
+          "hive.tez.max.partition.factor": "2.0",
+          "hive.tez.min.partition.factor": "0.25",
+          "hive.tez.smb.number.waves": "0.5",
+          "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
+          "hive.txn.max.open.batch": "1000",
+          "hive.txn.timeout": "300",
+          "hive.user.install.directory": "/user/",
+          "hive.vectorized.execution.reduce.enabled": "false",
+          "hive.vectorized.groupby.checkinterval": "4096",
+          "hive.vectorized.groupby.flush.percent": "0.1",
+          "hive.vectorized.groupby.maxentries": "100000",
+          "hive.zookeeper.client.port": "2181",
+          "hive.zookeeper.namespace": "hive_zookeeper_namespace",
+          "hive.auto.convert.join.noconditionaltask.size": "238026752",
+          "hive.metastore.client.socket.timeout": "1800s",
+          "hive.optimize.reducededuplication.min.reducer": "4",
+          "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
+          "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
+          "hive.server2.support.dynamic.service.discovery": "true",
+          "hive.tez.container.size": "682",
+          "hive.tez.java.opts": "-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
+          "fs.file.impl.disable.cache": "true",
+          "fs.hdfs.impl.disable.cache": "true"
+        },
+        "core-site": {
+          "hadoop.proxyuser.falcon.groups": "users",
+          "hadoop.proxyuser.falcon.hosts": "*"
+        },
+        "hdfs-site": {
+          "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+          "dfs.datanode.max.transfer.threads": "4096"
+        },
+        "hbase-site": {
+          "hbase.hregion.majorcompaction.jitter": "0.50",
+          "hbase.hregion.majorcompaction": "604800000",
+          "hbase.hregion.memstore.block.multiplier": "4",
+          "hbase.hstore.flush.retries.number": {"remove": "yes"}
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f28975c4/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.json
new file mode 100644
index 0000000..727987e
--- /dev/null
+++ b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.json
@@ -0,0 +1,289 @@
+{
+  "version": "1.0",
+  "stacks": [
+    {
+      "name": "HDP",
+      "old-version": "2.1",
+      "target-version": "2.2",
+      "options": {
+        "config-types": {
+          "core-site": {
+            "merged-copy": "yes"
+          },
+          "hdfs-site": {
+            "merged-copy": "yes"
+          },
+          "yarn-site": {
+            "merged-copy": "yes"
+          },
+          "mapred-site": {
+            "merged-copy": "yes"
+          },
+          "hbase-site": {
+            "merged-copy": "yes"
+          },
+          "hive-site": {
+            "merged-copy": "yes"
+          },
+          "oozie-site": {
+            "merged-copy": "yes"
+          },
+          "webhcat-site": {
+            "merged-copy": "yes"
+          },
+          "tez-site":{
+            "merged-copy": "yes"
+          },
+          "falcon-startup.properties": {
+            "merged-copy": "yes"
+          }
+        }
+      },
+      "properties": {
+        "falcon-startup.properties": {
+          "*.application.services": "org.apache.falcon.security.AuthenticationInitializationService, org.apache.falcon.workflow.WorkflowJobEndNotificationService, org.apache.falcon.service.ProcessSubscriberService, org.apache.falcon.entity.store.ConfigurationStore, org.apache.falcon.rerun.service.RetryService, org.apache.falcon.rerun.service.LateRunService, org.apache.falcon.service.LogCleanupService",
+          "*.dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
+          "*.falcon.enableTLS": "false",
+          "*.falcon.http.authentication.cookie.domain": "EXAMPLE.COM",
+          "*.falcon.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+          "*.falcon.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+          "*.falcon.security.authorization.admin.groups": "falcon",
+          "*.falcon.security.authorization.admin.users": "falcon,ambari-qa",
+          "*.falcon.security.authorization.enabled": "false",
+          "*.falcon.security.authorization.provider": "org.apache.falcon.security.DefaultAuthorizationProvider",
+          "*.falcon.security.authorization.superusergroup": "falcon",
+          "*.falcon.service.authentication.kerberos.keytab": "/etc/security/keytabs/falcon.service.keytab",
+          "*.falcon.service.authentication.kerberos.principal": "falcon/_HOST@EXAMPLE.COM",
+          "*.journal.impl": "org.apache.falcon.transaction.SharedFileSystemJournal",
+          "prism.application.services": "org.apache.falcon.entity.store.ConfigurationStore",
+          "prism.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph, org.apache.falcon.entity.ColoClusterRelation, org.apache.falcon.group.FeedGroupMap"
+        },
+        "tez-site":{
+          "tez.am.container.idle.release-timeout-max.millis": "20000",
+          "tez.am.container.idle.release-timeout-min.millis": "10000",
+          "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
+          "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
+          "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
+          "tez.am.max.app.attempts": "2",
+          "tez.am.maxtaskfailures.per.node": "10",
+          "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
+          "tez.counters.max": "2000",
+          "tez.counters.max.groups": "1000",
+          "tez.generate.debug.artifacts": "false",
+          "tez.grouping.max-size": "1073741824",
+          "tez.grouping.min-size": "16777216",
+          "tez.grouping.split-waves": "1.7",
+          "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService",
+          "tez.runtime.compress": "true",
+          "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec",
+          "tez.runtime.io.sort.mb": "272",
+          "tez.runtime.unordered.output.buffer.size-mb": "51",
+          "tez.shuffle-vertex-manager.max-src-fraction": "0.4",
+          "tez.shuffle-vertex-manager.min-src-fraction": "0.2",
+          "tez.task.am.heartbeat.counter.interval-ms.max": "4000",
+          "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
+          "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
+          "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
+          "tez.task.max-events-per-heartbeat": "500",
+          "tez.task.resource.memory.mb": "682",
+          "tez.am.container.reuse.non-local-fallback.enabled": "false",
+          "tez.am.resource.memory.mb": "1364",
+          "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz",
+          "tez.session.client.timeout.secs": "-1",
+          "tez.am.container.session.delay-allocation-millis": {"remove": "yes"},
+          "tez.am.env": {"remove": "yes"},
+          "tez.am.grouping.max-size": {"remove": "yes"},
+          "tez.am.grouping.min-size": {"remove": "yes"},
+          "tez.am.grouping.split-waves": {"remove": "yes"},
+          "tez.am.java.opt": {"remove": "yes"},
+          "tez.am.shuffle-vertex-manager.max-src-fraction": {"remove": "yes"},
+          "tez.am.shuffle-vertex-manager.min-src-fraction": {"remove": "yes"},
+          "tez.runtime.intermediate-input.compress.codec": {"remove": "yes"},
+          "tez.runtime.intermediate-input.is-compressed": {"remove": "yes"},
+          "tez.runtime.intermediate-output.compress.codec": {"remove": "yes"},
+          "tez.runtime.intermediate-output.should-compress": {"remove": "yes"},
+          "tez.yarn.ats.enabled": {"remove": "yes"}
+        },
+        "webhcat-site": {
+          "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
+          "templeton.port": "50111"
+        },
+        "oozie-site": {
+          "oozie.authentication.simple.anonymous.allowed": "true",
+          "oozie.service.coord.check.maximum.frequency": "false",
+          "oozie.service.HadoopAccessorService.kerberos.enabled": "false",
+          "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd",
+          "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService"
+        },
+        "hive-site": {
+          "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
+          "hive.cbo.enable": "true",
+          "hive.cli.print.header": "false",
+          "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
+          "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
+          "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
+          "hive.convert.join.bucket.mapjoin.tez": "false",
+          "hive.exec.compress.intermediate": "false",
+          "hive.exec.compress.output": "false",
+          "hive.exec.dynamic.partition": "true",
+          "hive.exec.dynamic.partition.mode": "nonstrict",
+          "hive.exec.max.created.files": "100000",
+          "hive.exec.max.dynamic.partitions": "5000",
+          "hive.exec.max.dynamic.partitions.pernode": "2000",
+          "hive.exec.orc.compression.strategy": "SPEED",
+          "hive.exec.orc.default.compress": "ZLIB",
+          "hive.exec.orc.default.stripe.size": "67108864",
+          "hive.exec.parallel": "false",
+          "hive.exec.parallel.thread.number": "8",
+          "hive.exec.reducers.bytes.per.reducer": "67108864",
+          "hive.exec.reducers.max": "1009",
+          "hive.exec.scratchdir": "/tmp/hive",
+          "hive.exec.submit.local.task.via.child": "true",
+          "hive.exec.submitviachild": "false",
+          "hive.fetch.task.aggr": "false",
+          "hive.fetch.task.conversion": "more",
+          "hive.fetch.task.conversion.threshold": "1073741824",
+          "hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
+          "hive.map.aggr.hash.min.reduction": "0.5",
+          "hive.map.aggr.hash.percentmemory": "0.5",
+          "hive.mapjoin.optimized.hashtable": "true",
+          "hive.merge.mapfiles": "true",
+          "hive.merge.mapredfiles": "false",
+          "hive.merge.orcfile.stripe.level": "true",
+          "hive.merge.rcfile.block.level": "true",
+          "hive.merge.size.per.task": "256000000",
+          "hive.merge.smallfiles.avgsize": "16000000",
+          "hive.merge.tezfiles": "false",
+          "hive.metastore.authorization.storage.checks": "false",
+          "hive.metastore.client.connect.retry.delay": "5s",
+          "hive.metastore.connect.retries": "24",
+          "hive.metastore.failure.retries": "24",
+          "hive.metastore.server.max.threads": "100000",
+          "hive.optimize.constant.propagation": "true",
+          "hive.optimize.metadataonly": "true",
+          "hive.optimize.null.scan": "true",
+          "hive.optimize.sort.dynamic.partition": "false",
+          "hive.orc.compute.splits.num.threads": "10",
+          "hive.prewarm.enabled": "false",
+          "hive.prewarm.numcontainers": "10",
+          "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
+          "hive.security.metastore.authorization.auth.reads": "true",
+          "hive.server2.allow.user.substitution": "true",
+          "hive.server2.logging.operation.enabled": "true",
+          "hive.server2.logging.operation.log.location": "${system:java.io.tmpdir}/${system:user.name}/operation_logs",
+          "hive.server2.table.type.mapping": "CLASSIC",
+          "hive.server2.thrift.http.path": "cliservice",
+          "hive.server2.thrift.http.port": "10001",
+          "hive.server2.thrift.max.worker.threads": "500",
+          "hive.server2.thrift.sasl.qop": "auth",
+          "hive.server2.transport.mode": "binary",
+          "hive.server2.use.SSL": "false",
+          "hive.smbjoin.cache.rows": "10000",
+          "hive.stats.dbclass": "fs",
+          "hive.stats.fetch.column.stats": "false",
+          "hive.stats.fetch.partition.stats": "true",
+          "hive.support.concurrency": "false",
+          "hive.tez.auto.reducer.parallelism": "false",
+          "hive.tez.cpu.vcores": "-1",
+          "hive.tez.dynamic.partition.pruning": "true",
+          "hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
+          "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
+          "hive.tez.log.level": "INFO",
+          "hive.tez.max.partition.factor": "2.0",
+          "hive.tez.min.partition.factor": "0.25",
+          "hive.tez.smb.number.waves": "0.5",
+          "hive.user.install.directory": "/user/",
+          "hive.vectorized.execution.reduce.enabled": "false",
+          "hive.zookeeper.client.port": "2181",
+          "hive.zookeeper.namespace": "hive_zookeeper_namespace",
+          "hive.metastore.client.socket.timeout": "1800s",
+          "hive.optimize.reducededuplication.min.reducer": "4",
+          "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
+          "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
+          "hive.server2.support.dynamic.service.discovery": "true",
+          "hive.vectorized.groupby.checkinterval": "4096",
+          "fs.file.impl.disable.cache": "true",
+          "fs.hdfs.impl.disable.cache": "true"
+        },
+        "hbase-site": {
+          "hbase.hregion.majorcompaction.jitter": "0.50",
+          "hbase.hregion.majorcompaction": "604800000",
+          "hbase.hregion.memstore.block.multiplier": "4",
+          "hbase.hstore.flush.retries.number": {"remove": "yes"}
+        },
+        "mapred-site": {
+          "mapreduce.job.emit-timeline-data": "false",
+          "mapreduce.jobhistory.bind-host": "0.0.0.0",
+          "mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
+          "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
+          "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
+          "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
+          "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
+          "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
+          "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
+          "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}",
+          "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
+          "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64"
+        },
+        "core-site": {
+          "hadoop.http.authentication.simple.anonymous.allowed": "true"
+        },
+        "hdfs-site": {
+          "dfs.namenode.startup.delay.block.deletion.sec": "3600",
+          "dfs.datanode.max.transfer.threads": "4096"
+        },
+        "yarn-site": {
+          "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.client.nodemanager-connect.max-wait-ms": "900000",
+          "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
+          "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
+          "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
+          "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
+          "yarn.nodemanager.bind-host": "0.0.0.0",
+          "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
+          "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
+          "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
+          "yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
+          "yarn.nodemanager.linux-container-executor.cgroups.strictresource-usage": "false",
+          "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
+          "yarn.nodemanager.log-aggregation.debug-enabled": "false",
+          "yarn.nodemanager.log-aggregation.num-log-files-er-app": "30",
+          "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
+          "yarn.nodemanager.recovery.dir": "/var/log/hadoop-yarn/nodemanager/recovery-state",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.nodemanager.resource.cpu-vcores": "1",
+          "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
+          "yarn.resourcemanager.bind-host": "0.0.0.0",
+          "yarn.resourcemanager.connect.max-wait.ms": "900000",
+          "yarn.resourcemanager.connect.retry-interval.ms": "30000",
+          "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
+          "yarn.resourcemanager.fs.state-store.uri": "",
+          "yarn.resourcemanager.ha.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
+          "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
+          "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
+          "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
+          "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
+          "yarn.resourcemanager.work-preserving-recovery.enabled": "false",
+          "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
+          "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
+          "yarn.resourcemanager.zk-address": "localhost:2181",
+          "yarn.resourcemanager.zk-num-retries": "1000",
+          "yarn.resourcemanager.zk-retry-interval-ms": "1000",
+          "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
+          "yarn.resourcemanager.zk-timeout-ms": "10000",
+          "yarn.timeline-service.bind-host": "0.0.0.0",
+          "yarn.timeline-service.client.max-retries": "30",
+          "yarn.timeline-service.client.retry-interval-ms": "1000",
+          "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
+          "yarn.timeline-service.http-authentication.type": "simple",
+          "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
+          "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
+          "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000"
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file