You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sm...@apache.org on 2013/10/01 11:14:17 UTC

git commit: AMBARI-3395. Update config property mapping for upgrade.

Updated Branches:
  refs/heads/trunk 62b1902eb -> 66236fe78


AMBARI-3395. Update config property mapping for upgrade.


Project: http://git-wip-us.apache.org/repos/asf/incubator-ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ambari/commit/66236fe7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ambari/tree/66236fe7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ambari/diff/66236fe7

Branch: refs/heads/trunk
Commit: 66236fe786cf25248b8b1d3e0396aa262bfa3cb2
Parents: 62b1902
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Tue Oct 1 02:13:23 2013 -0700
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Tue Oct 1 02:13:30 2013 -0700

----------------------------------------------------------------------
 .../src/main/python/UpgradeHelper_HDP2.py       | 458 +++++++++++++++----
 .../src/test/python/TestUpgradeScript_HDP2.py   |  52 ++-
 2 files changed, 404 insertions(+), 106 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/66236fe7/ambari-server/src/main/python/UpgradeHelper_HDP2.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/UpgradeHelper_HDP2.py b/ambari-server/src/main/python/UpgradeHelper_HDP2.py
index 8dac575..2b8b822 100644
--- a/ambari-server/src/main/python/UpgradeHelper_HDP2.py
+++ b/ambari-server/src/main/python/UpgradeHelper_HDP2.py
@@ -44,7 +44,6 @@ VALID_ACTIONS = ', '.join([GET_MR_MAPPING_ACTION, DELETE_MR_ACTION, ADD_YARN_MR2
 MR_MAPPING_FILE = "mr_mapping"
 UPGRADE_LOG_FILE = "upgrade_log"
 CAPACITY_SCHEDULER_TAG = "capacity-scheduler"
-MAPRED_QUEUE_ACLS_TAG = "mapred-queue-acls"
 MAPRED_SITE_TAG = "mapred-site"
 GLOBAL_TAG = "global"
 HDFS_SITE_TAG = "hdfs-site"
@@ -60,6 +59,267 @@ URL_FORMAT = 'http://{0}:8080/api/v1/clusters/{1}'
 
 logger = logging.getLogger()
 
+# old : new
+PROPERTY_MAPPING = {
+  "create.empty.dir.if.nonexist": "mapreduce.jobcontrol.createdir.ifnotexist",
+  "dfs.access.time.precision": "dfs.namenode.accesstime.precision",
+  "dfs.backup.address": "dfs.namenode.backup.address",
+  "dfs.backup.http.address": "dfs.namenode.backup.http-address",
+  "dfs.balance.bandwidthPerSec": "dfs.datanode.balance.bandwidthPerSec",
+  "dfs.block.size": "dfs.blocksize",
+  "dfs.data.dir": "dfs.datanode.data.dir",
+  "dfs.datanode.max.xcievers": "dfs.datanode.max.transfer.threads",
+  "dfs.df.interval": "fs.df.interval",
+  "dfs.federation.nameservice.id": "dfs.nameservice.id",
+  "dfs.federation.nameservices": "dfs.nameservices",
+  "dfs.http.address": "dfs.namenode.http-address",
+  "dfs.https.address": "dfs.namenode.https-address",
+  "dfs.https.client.keystore.resource": "dfs.client.https.keystore.resource",
+  "dfs.https.need.client.auth": "dfs.client.https.need-auth",
+  "dfs.max.objects": "dfs.namenode.max.objects",
+  "dfs.max-repl-streams": "dfs.namenode.replication.max-streams",
+  "dfs.name.dir": "dfs.namenode.name.dir",
+  "dfs.name.dir.restore": "dfs.namenode.name.dir.restore",
+  "dfs.name.edits.dir": "dfs.namenode.edits.dir",
+  "dfs.permissions": "dfs.permissions.enabled",
+  "dfs.permissions.supergroup": "dfs.permissions.superusergroup",
+  "dfs.read.prefetch.size": "dfs.client.read.prefetch.size",
+  "dfs.replication.considerLoad": "dfs.namenode.replication.considerLoad",
+  "dfs.replication.interval": "dfs.namenode.replication.interval",
+  "dfs.replication.min": "dfs.namenode.replication.min",
+  "dfs.replication.pending.timeout.sec": "dfs.namenode.replication.pending.timeout-sec",
+  "dfs.safemode.extension": "dfs.namenode.safemode.extension",
+  "dfs.safemode.threshold.pct": "dfs.namenode.safemode.threshold-pct",
+  "dfs.secondary.http.address": "dfs.namenode.secondary.http-address",
+  "dfs.socket.timeout": "dfs.client.socket-timeout",
+  "dfs.umaskmode": "fs.permissions.umask-mode",
+  "dfs.write.packet.size": "dfs.client-write-packet-size",
+  "fs.checkpoint.dir": "dfs.namenode.checkpoint.dir",
+  "fs.checkpoint.edits.dir": "dfs.namenode.checkpoint.edits.dir",
+  "fs.checkpoint.period": "dfs.namenode.checkpoint.period",
+  "fs.default.name": "fs.defaultFS",
+  "hadoop.configured.node.mapping": "net.topology.configured.node.mapping",
+  "hadoop.job.history.location": "mapreduce.jobtracker.jobhistory.location",
+  "hadoop.native.lib": "io.native.lib.available",
+  "hadoop.net.static.resolutions": "mapreduce.tasktracker.net.static.resolutions",
+  "hadoop.pipes.command-file.keep": "mapreduce.pipes.commandfile.preserve",
+  "hadoop.pipes.executable.interpretor": "mapreduce.pipes.executable.interpretor",
+  "hadoop.pipes.executable": "mapreduce.pipes.executable",
+  "hadoop.pipes.java.mapper": "mapreduce.pipes.isjavamapper",
+  "hadoop.pipes.java.recordreader": "mapreduce.pipes.isjavarecordreader",
+  "hadoop.pipes.java.recordwriter": "mapreduce.pipes.isjavarecordwriter",
+  "hadoop.pipes.java.reducer": "mapreduce.pipes.isjavareducer",
+  "hadoop.pipes.partitioner": "mapreduce.pipes.partitioner",
+  "heartbeat.recheck.interval": "dfs.namenode.heartbeat.recheck-interval",
+  "io.bytes.per.checksum": "dfs.bytes-per-checksum",
+  "io.sort.factor": "mapreduce.task.io.sort.factor",
+  "io.sort.mb": "mapreduce.task.io.sort.mb",
+  "io.sort.spill.percent": "mapreduce.map.sort.spill.percent",
+  "jobclient.completion.poll.interval": "mapreduce.client.completion.pollinterval",
+  "jobclient.output.filter": "mapreduce.client.output.filter",
+  "jobclient.progress.monitor.poll.interval": "mapreduce.client.progressmonitor.pollinterval",
+  "job.end.notification.url": "mapreduce.job.end-notification.url",
+  "job.end.retry.attempts": "mapreduce.job.end-notification.retry.attempts",
+  "job.end.retry.interval": "mapreduce.job.end-notification.retry.interval",
+  "job.local.dir": "mapreduce.job.local.dir",
+  "keep.failed.task.files": "mapreduce.task.files.preserve.failedtasks",
+  "keep.task.files.pattern": "mapreduce.task.files.preserve.filepattern",
+  "key.value.separator.in.input.line": "mapreduce.input.keyvaluelinerecordreader.key.value.separator",
+  "local.cache.size": "mapreduce.tasktracker.cache.local.size",
+  "map.input.file": "mapreduce.map.input.file",
+  "map.input.length": "mapreduce.map.input.length",
+  "map.input.start": "mapreduce.map.input.start",
+  "map.output.key.field.separator": "mapreduce.map.output.key.field.separator",
+  "map.output.key.value.fields.spec": "mapreduce.fieldsel.map.output.key.value.fields.spec",
+  "mapred.acls.enabled": "mapreduce.cluster.acls.enabled",
+  "mapred.binary.partitioner.left.offset": "mapreduce.partition.binarypartitioner.left.offset",
+  "mapred.binary.partitioner.right.offset": "mapreduce.partition.binarypartitioner.right.offset",
+  "mapred.cache.archives": "mapreduce.job.cache.archives",
+  "mapred.cache.archives.timestamps": "mapreduce.job.cache.archives.timestamps",
+  "mapred.cache.files": "mapreduce.job.cache.files",
+  "mapred.cache.files.timestamps": "mapreduce.job.cache.files.timestamps",
+  "mapred.cache.localArchives": "mapreduce.job.cache.local.archives",
+  "mapred.cache.localFiles": "mapreduce.job.cache.local.files",
+  "mapred.child.tmp": "mapreduce.task.tmp.dir",
+  "mapred.cluster.average.blacklist.threshold": "mapreduce.jobtracker.blacklist.average.threshold",
+  "mapred.cluster.map.memory.mb": "mapreduce.cluster.mapmemory.mb",
+  "mapred.cluster.max.map.memory.mb": "mapreduce.jobtracker.maxmapmemory.mb",
+  "mapred.cluster.max.reduce.memory.mb": "mapreduce.jobtracker.maxreducememory.mb",
+  "mapred.cluster.reduce.memory.mb": "mapreduce.cluster.reducememory.mb",
+  "mapred.committer.job.setup.cleanup.needed": "mapreduce.job.committer.setup.cleanup.needed",
+  "mapred.compress.map.output": "mapreduce.map.output.compress",
+  "mapred.data.field.separator": "mapreduce.fieldsel.data.field.separator",
+  "mapred.debug.out.lines": "mapreduce.task.debugout.lines",
+  "mapred.healthChecker.interval": "mapreduce.tasktracker.healthchecker.interval",
+  "mapred.healthChecker.script.args": "mapreduce.tasktracker.healthchecker.script.args",
+  "mapred.healthChecker.script.path": "mapreduce.tasktracker.healthchecker.script.path",
+  "mapred.healthChecker.script.timeout": "mapreduce.tasktracker.healthchecker.script.timeout",
+  "mapred.heartbeats.in.second": "mapreduce.jobtracker.heartbeats.in.second",
+  "mapred.hosts.exclude": "mapreduce.jobtracker.hosts.exclude.filename",
+  "mapred.hosts": "mapreduce.jobtracker.hosts.filename",
+  "mapred.inmem.merge.threshold": "mapreduce.reduce.merge.inmem.threshold",
+  "mapred.input.dir.formats": "mapreduce.input.multipleinputs.dir.formats",
+  "mapred.input.dir.mappers": "mapreduce.input.multipleinputs.dir.mappers",
+  "mapred.input.dir": "mapreduce.input.fileinputformat.inputdir",
+  "mapred.input.pathFilter.class": "mapreduce.input.pathFilter.class",
+  "mapred.jar": "mapreduce.job.jar",
+  "mapred.job.classpath.archives": "mapreduce.job.classpath.archives",
+  "mapred.job.classpath.files": "mapreduce.job.classpath.files",
+  "mapred.job.id": "mapreduce.job.id",
+  "mapred.jobinit.threads": "mapreduce.jobtracker.jobinit.threads",
+  "mapred.job.map.memory.mb": "mapreduce.map.memory.mb",
+  "mapred.job.name": "mapreduce.job.name",
+  "mapred.job.priority": "mapreduce.job.priority",
+  "mapred.job.queue.name": "mapreduce.job.queuename",
+  "mapred.job.reduce.input.buffer.percent": "mapreduce.reduce.input.buffer.percent",
+  "mapred.job.reduce.markreset.buffer.percent": "mapreduce.reduce.markreset.buffer.percent",
+  "mapred.job.reduce.memory.mb": "mapreduce.reduce.memory.mb",
+  "mapred.job.reduce.total.mem.bytes": "mapreduce.reduce.memory.totalbytes",
+  "mapred.job.reuse.jvm.num.tasks": "mapreduce.job.jvm.numtasks",
+  "mapred.job.shuffle.input.buffer.percent": "mapreduce.reduce.shuffle.input.buffer.percent",
+  "mapred.job.shuffle.merge.percent": "mapreduce.reduce.shuffle.merge.percent",
+  "mapred.job.tracker.handler.count": "mapreduce.jobtracker.handler.count",
+  "mapred.job.tracker.history.completed.location": "mapreduce.jobtracker.jobhistory.completed.location",
+  "mapred.job.tracker.http.address": "mapreduce.jobtracker.http.address",
+  "mapred.jobtracker.instrumentation": "mapreduce.jobtracker.instrumentation",
+  "mapred.jobtracker.job.history.block.size": "mapreduce.jobtracker.jobhistory.block.size",
+  "mapred.job.tracker.jobhistory.lru.cache.size": "mapreduce.jobtracker.jobhistory.lru.cache.size",
+  "mapred.job.tracker": "mapreduce.jobtracker.address",
+  "mapred.jobtracker.maxtasks.per.job": "mapreduce.jobtracker.maxtasks.perjob",
+  "mapred.job.tracker.persist.jobstatus.active": "mapreduce.jobtracker.persist.jobstatus.active",
+  "mapred.job.tracker.persist.jobstatus.dir": "mapreduce.jobtracker.persist.jobstatus.dir",
+  "mapred.job.tracker.persist.jobstatus.hours": "mapreduce.jobtracker.persist.jobstatus.hours",
+  "mapred.jobtracker.restart.recover": "mapreduce.jobtracker.restart.recover",
+  "mapred.job.tracker.retiredjobs.cache.size": "mapreduce.jobtracker.retiredjobs.cache.size",
+  "mapred.job.tracker.retire.jobs": "mapreduce.jobtracker.retirejobs",
+  "mapred.jobtracker.taskalloc.capacitypad": "mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad",
+  "mapred.jobtracker.taskScheduler": "mapreduce.jobtracker.taskscheduler",
+  "mapred.jobtracker.taskScheduler.maxRunningTasksPerJob": "mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob",
+  "mapred.join.expr": "mapreduce.join.expr",
+  "mapred.join.keycomparator": "mapreduce.join.keycomparator",
+  "mapred.lazy.output.format": "mapreduce.output.lazyoutputformat.outputformat",
+  "mapred.line.input.format.linespermap": "mapreduce.input.lineinputformat.linespermap",
+  "mapred.linerecordreader.maxlength": "mapreduce.input.linerecordreader.line.maxlength",
+  "mapred.local.dir": "mapreduce.cluster.local.dir",
+  "mapred.local.dir.minspacekill": "mapreduce.tasktracker.local.dir.minspacekill",
+  "mapred.local.dir.minspacestart": "mapreduce.tasktracker.local.dir.minspacestart",
+  "mapred.map.child.env": "mapreduce.map.env",
+  "mapred.map.child.java.opts": "mapreduce.map.java.opts",
+  "mapred.map.child.log.level": "mapreduce.map.log.level",
+  "mapred.map.max.attempts": "mapreduce.map.maxattempts",
+  "mapred.map.output.compression.codec": "mapreduce.map.output.compress.codec",
+  "mapred.mapoutput.key.class": "mapreduce.map.output.key.class",
+  "mapred.mapoutput.value.class": "mapreduce.map.output.value.class",
+  "mapred.mapper.regex.group": "mapreduce.mapper.regexmapper..group",
+  "mapred.mapper.regex": "mapreduce.mapper.regex",
+  "mapred.map.task.debug.script": "mapreduce.map.debug.script",
+  "mapred.map.tasks": "mapreduce.job.maps",
+  "mapred.map.tasks.speculative.execution": "mapreduce.map.speculative",
+  "mapred.max.map.failures.percent": "mapreduce.map.failures.maxpercent",
+  "mapred.max.reduce.failures.percent": "mapreduce.reduce.failures.maxpercent",
+  "mapred.max.split.size": "mapreduce.input.fileinputformat.split.maxsize",
+  "mapred.max.tracker.blacklists": "mapreduce.jobtracker.tasktracker.maxblacklists",
+  "mapred.max.tracker.failures": "mapreduce.job.maxtaskfailures.per.tracker",
+  "mapred.merge.recordsBeforeProgress": "mapreduce.task.merge.progress.records",
+  "mapred.min.split.size": "mapreduce.input.fileinputformat.split.minsize",
+  "mapred.min.split.size.per.node": "mapreduce.input.fileinputformat.split.minsize.per.node",
+  "mapred.min.split.size.per.rack": "mapreduce.input.fileinputformat.split.minsize.per.rack",
+  "mapred.output.compression.codec": "mapreduce.output.fileoutputformat.compress.codec",
+  "mapred.output.compression.type": "mapreduce.output.fileoutputformat.compress.type",
+  "mapred.output.compress": "mapreduce.output.fileoutputformat.compress",
+  "mapred.output.dir": "mapreduce.output.fileoutputformat.outputdir",
+  "mapred.output.key.class": "mapreduce.job.output.key.class",
+  "mapred.output.key.comparator.class": "mapreduce.job.output.key.comparator.class",
+  "mapred.output.value.class": "mapreduce.job.output.value.class",
+  "mapred.output.value.groupfn.class": "mapreduce.job.output.group.comparator.class",
+  "mapred.permissions.supergroup": "mapreduce.cluster.permissions.supergroup",
+  "mapred.pipes.user.inputformat": "mapreduce.pipes.inputformat",
+  "mapred.reduce.child.env": "mapreduce.reduce.env",
+  "mapred.reduce.child.java.opts": "mapreduce.reduce.java.opts",
+  "mapred.reduce.child.log.level": "mapreduce.reduce.log.level",
+  "mapred.reduce.max.attempts": "mapreduce.reduce.maxattempts",
+  "mapred.reduce.parallel.copies": "mapreduce.reduce.shuffle.parallelcopies",
+  "mapred.reduce.slowstart.completed.maps": "mapreduce.job.reduce.slowstart.completedmaps",
+  "mapred.reduce.task.debug.script": "mapreduce.reduce.debug.script",
+  "mapred.reduce.tasks": "mapreduce.job.reduces",
+  "mapred.reduce.tasks.speculative.execution": "mapreduce.reduce.speculative",
+  "mapred.seqbinary.output.key.class": "mapreduce.output.seqbinaryoutputformat.key.class",
+  "mapred.seqbinary.output.value.class": "mapreduce.output.seqbinaryoutputformat.value.class",
+  "mapred.shuffle.connect.timeout": "mapreduce.reduce.shuffle.connect.timeout",
+  "mapred.shuffle.read.timeout": "mapreduce.reduce.shuffle.read.timeout",
+  "mapred.skip.attempts.to.start.skipping": "mapreduce.task.skip.start.attempts",
+  "mapred.skip.map.auto.incr.proc.count": "mapreduce.map.skip.proc-count.auto-incr",
+  "mapred.skip.map.max.skip.records": "mapreduce.map.skip.maxrecords",
+  "mapred.skip.on": "mapreduce.job.skiprecords",
+  "mapred.skip.out.dir": "mapreduce.job.skip.outdir",
+  "mapred.skip.reduce.auto.incr.proc.count": "mapreduce.reduce.skip.proc-count.auto-incr",
+  "mapred.skip.reduce.max.skip.groups": "mapreduce.reduce.skip.maxgroups",
+  "mapred.speculative.execution.slowNodeThreshold": "mapreduce.job.speculative.slownodethreshold",
+  "mapred.speculative.execution.slowTaskThreshold": "mapreduce.job.speculative.slowtaskthreshold",
+  "mapred.speculative.execution.speculativeCap": "mapreduce.job.speculative.speculativecap",
+  "mapred.submit.replication": "mapreduce.client.submit.file.replication",
+  "mapred.system.dir": "mapreduce.jobtracker.system.dir",
+  "mapred.task.cache.levels": "mapreduce.jobtracker.taskcache.levels",
+  "mapred.task.id": "mapreduce.task.attempt.id",
+  "mapred.task.is.map": "mapreduce.task.ismap",
+  "mapred.task.partition": "mapreduce.task.partition",
+  "mapred.task.profile": "mapreduce.task.profile",
+  "mapred.task.profile.maps": "mapreduce.task.profile.maps",
+  "mapred.task.profile.params": "mapreduce.task.profile.params",
+  "mapred.task.profile.reduces": "mapreduce.task.profile.reduces",
+  "mapred.task.timeout": "mapreduce.task.timeout",
+  "mapred.tasktracker.dns.interface": "mapreduce.tasktracker.dns.interface",
+  "mapred.tasktracker.dns.nameserver": "mapreduce.tasktracker.dns.nameserver",
+  "mapred.tasktracker.events.batchsize": "mapreduce.tasktracker.events.batchsize",
+  "mapred.tasktracker.expiry.interval": "mapreduce.jobtracker.expire.trackers.interval",
+  "mapred.task.tracker.http.address": "mapreduce.tasktracker.http.address",
+  "mapred.tasktracker.indexcache.mb": "mapreduce.tasktracker.indexcache.mb",
+  "mapred.tasktracker.instrumentation": "mapreduce.tasktracker.instrumentation",
+  "mapred.tasktracker.map.tasks.maximum": "mapreduce.tasktracker.map.tasks.maximum",
+  "mapred.tasktracker.memory_calculator_plugin": "mapreduce.tasktracker.resourcecalculatorplugin",
+  "mapred.tasktracker.memorycalculatorplugin": "mapreduce.tasktracker.resourcecalculatorplugin",
+  "mapred.tasktracker.reduce.tasks.maximum": "mapreduce.tasktracker.reduce.tasks.maximum",
+  "mapred.task.tracker.report.address": "mapreduce.tasktracker.report.address",
+  "mapred.task.tracker.task-controller": "mapreduce.tasktracker.taskcontroller",
+  "mapred.tasktracker.taskmemorymanager.monitoring-interval": "mapreduce.tasktracker.taskmemorymanager.monitoringinterval",
+  "mapred.tasktracker.tasks.sleeptime-before-sigkill": "mapreduce.tasktracker.tasks.sleeptimebeforesigkill",
+  "mapred.temp.dir": "mapreduce.cluster.temp.dir",
+  "mapred.text.key.comparator.options": "mapreduce.partition.keycomparator.options",
+  "mapred.text.key.partitioner.options": "mapreduce.partition.keypartitioner.options",
+  "mapred.textoutputformat.separator": "mapreduce.output.textoutputformat.separator",
+  "mapred.tip.id": "mapreduce.task.id",
+  "mapreduce.combine.class": "mapreduce.job.combine.class",
+  "mapreduce.inputformat.class": "mapreduce.job.inputformat.class",
+  "mapreduce.job.counters.limit": "mapreduce.job.counters.max",
+  "mapreduce.jobtracker.permissions.supergroup": "mapreduce.cluster.permissions.supergroup",
+  "mapreduce.map.class": "mapreduce.job.map.class",
+  "mapreduce.outputformat.class": "mapreduce.job.outputformat.class",
+  "mapreduce.partitioner.class": "mapreduce.job.partitioner.class",
+  "mapreduce.reduce.class": "mapreduce.job.reduce.class",
+  "mapred.used.genericoptionsparser": "mapreduce.client.genericoptionsparser.used",
+  "mapred.userlog.limit.kb": "mapreduce.task.userlog.limit.kb",
+  "mapred.userlog.retain.hours": "mapreduce.job.userlog.retain.hours",
+  "mapred.working.dir": "mapreduce.job.working.dir",
+  "mapred.work.output.dir": "mapreduce.task.output.dir",
+  "min.num.spills.for.combine": "mapreduce.map.combine.minspills",
+  "reduce.output.key.value.fields.spec": "mapreduce.fieldsel.reduce.output.key.value.fields.spec",
+  "security.job.submission.protocol.acl": "security.job.client.protocol.acl",
+  "security.task.umbilical.protocol.acl": "security.job.task.protocol.acl",
+  "sequencefile.filter.class": "mapreduce.input.sequencefileinputfilter.class",
+  "sequencefile.filter.frequency": "mapreduce.input.sequencefileinputfilter.frequency",
+  "sequencefile.filter.regex": "mapreduce.input.sequencefileinputfilter.regex",
+  "session.id": "dfs.metrics.session-id",
+  "slave.host.name": "dfs.datanode.hostname",
+  "slave.host.name": "mapreduce.tasktracker.host.name",
+  "tasktracker.contention.tracking": "mapreduce.tasktracker.contention.tracking",
+  "tasktracker.http.threads": "mapreduce.tasktracker.http.threads",
+  "topology.node.switch.mapping.impl": "net.topology.node.switch.mapping.impl",
+  "topology.script.file.name": "net.topology.script.file.name",
+  "topology.script.number.args": "net.topology.script.number.args",
+  "user.name": "mapreduce.job.user.name",
+  "webinterface.private.actions": "mapreduce.jobtracker.webinterface.trusted"
+}
+
 CAPACITY_SCHEDULER = {
   "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
   "yarn.scheduler.capacity.maximum-applications": "10000",
@@ -72,64 +332,22 @@ CAPACITY_SCHEDULER = {
   "yarn.scheduler.capacity.root.default.state": "RUNNING",
   "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
   "yarn.scheduler.capacity.root.queues": "default",
-  "yarn.scheduler.capacity.root.unfunded.capacity": "50"}
-
-MAPRED_QUEUE_ACLS = {
-  "mapred.queue.default.acl-administer-jobs": "*", "mapred.queue.default.acl-submit-job": "*"}
+  "yarn.scheduler.capacity.root.unfunded.capacity": "50"
+}
 
 MAPRED_SITE = {
   "hadoop.job.history.user.location": "DELETE_OLD",
-  "io.sort.factor": "DELETE_OLD",
-  "io.sort.mb": "DELETE_OLD",
   "io.sort.record.percent": "DELETE_OLD",
-  "io.sort.spill.percent": "DELETE_OLD",
   "jetty.connector": "DELETE_OLD",
   "mapred.child.java.opts": "DELETE_OLD",
   "mapred.child.root.logger": "DELETE_OLD",
-  "mapred.cluster.map.memory.mb": "DELETE_OLD",
-  "mapred.cluster.max.map.memory.mb": "DELETE_OLD",
-  "mapred.cluster.max.reduce.memory.mb": "DELETE_OLD",
-  "mapred.cluster.reduce.memory.mb": "DELETE_OLD",
-  "mapred.healthChecker.interval": "DELETE_OLD",
-  "mapred.healthChecker.script.path": "DELETE_OLD",
-  "mapred.healthChecker.script.timeout": "DELETE_OLD",
-  "mapred.inmem.merge.threshold": "DELETE_OLD",
-  "mapred.job.map.memory.mb": "DELETE_OLD",
-  "mapred.job.reduce.input.buffer.percent": "DELETE_OLD",
-  "mapred.job.reduce.memory.mb": "DELETE_OLD",
-  "mapred.job.reuse.jvm.num.tasks": "DELETE_OLD",
-  "mapred.job.shuffle.input.buffer.percent": "DELETE_OLD",
-  "mapred.job.shuffle.merge.percent": "DELETE_OLD",
-  "mapred.job.tracker": "DELETE_OLD",
-  "mapred.job.tracker.handler.count": "DELETE_OLD",
-  "mapred.job.tracker.history.completed.location": "DELETE_OLD",
-  "mapred.job.tracker.http.address": "DELETE_OLD",
-  "mapred.job.tracker.persist.jobstatus.active": "DELETE_OLD",
-  "mapred.job.tracker.persist.jobstatus.dir": "DELETE_OLD",
-  "mapred.job.tracker.persist.jobstatus.hours": "DELETE_OLD",
   "mapred.jobtracker.blacklist.fault-bucket-width": "DELETE_OLD",
   "mapred.jobtracker.blacklist.fault-timeout-window": "DELETE_OLD",
   "mapred.jobtracker.completeuserjobs.maximum": "DELETE_OLD",
-  "mapred.jobtracker.maxtasks.per.job": "DELETE_OLD",
-  "mapred.jobtracker.restart.recover": "DELETE_OLD",
   "mapred.jobtracker.retirejob.check": "DELETE_OLD",
   "mapred.jobtracker.retirejob.interval": "DELETE_OLD",
-  "mapred.jobtracker.taskScheduler": "DELETE_OLD",
-  "mapred.local.dir": "DELETE_OLD",
-  "mapred.map.output.compression.codec": "DELETE_OLD",
-  "mapred.map.tasks.speculative.execution": "DELETE_OLD",
-  "mapred.max.tracker.blacklists": "DELETE_OLD",
-  "mapred.output.compression.type": "DELETE_OLD",
   "mapred.queue.names": "DELETE_OLD",
-  "mapred.reduce.parallel.copies": "DELETE_OLD",
-  "mapred.reduce.slowstart.completed.maps": "DELETE_OLD",
-  "mapred.reduce.tasks.speculative.execution": "DELETE_OLD",
-  "mapred.system.dir": "DELETE_OLD",
-  "mapred.task.timeout": "DELETE_OLD",
-  "mapred.tasktracker.map.tasks.maximum": "DELETE_OLD",
-  "mapred.tasktracker.reduce.tasks.maximum": "DELETE_OLD",
-  "mapred.tasktracker.tasks.sleeptime-before-sigkill": "DELETE_OLD",
-  "mapred.userlog.retain.hours": "DELETE_OLD",
+  "mapreduce.job.userlog.retain.hours": "DELETE_OLD",
   "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
   "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
   "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`",
@@ -139,36 +357,73 @@ MAPRED_SITE = {
   "mapreduce.framework.name": "yarn",
   "mapreduce.history.server.embedded": "DELETE_OLD",
   "mapreduce.history.server.http.address": "DELETE_OLD",
-  "mapreduce.job.reduce.slowstart.completedmaps": "REPLACE_WITH_mapred.reduce.slowstart.completed.maps",
   "mapreduce.jobhistory.address": "REPLACE_JH_HOST:10020",
-  "mapreduce.jobhistory.done-dir": "REPLACE_WITH_mapred.job.tracker.history.completed.location",
+  "mapreduce.jobhistory.done-dir": "REPLACE_WITH_mapreduce.jobtracker.jobhistory.completed.location",
   "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
   "mapreduce.jobhistory.webapp.address": "REPLACE_JH_HOST:19888",
+  "mapreduce.jobtracker.address": "DELETE_OLD",
+  "mapreduce.jobtracker.blacklist.average.threshold": "DELETE_OLD",
+  "mapreduce.jobtracker.expire.trackers.interval": "DELETE_OLD",
+  "mapreduce.jobtracker.handler.count": "DELETE_OLD",
+  "mapreduce.jobtracker.heartbeats.in.second": "DELETE_OLD",
+  "mapreduce.jobtracker.hosts.exclude.filename": "DELETE_OLD",
+  "mapreduce.jobtracker.hosts.filename": "DELETE_OLD",
+  "mapreduce.jobtracker.http.address": "DELETE_OLD",
+  "mapreduce.jobtracker.instrumentation": "DELETE_OLD",
+  "mapreduce.jobtracker.jobhistory.block.size": "DELETE_OLD",
+  "mapreduce.jobtracker.jobhistory.location": "DELETE_OLD",
+  "mapreduce.jobtracker.jobhistory.lru.cache.size": "DELETE_OLD",
+  "mapreduce.jobtracker.maxmapmemory.mb": "DELETE_OLD",
+  "mapreduce.jobtracker.maxreducememory.mb": "DELETE_OLD",
+  "mapreduce.jobtracker.maxtasks.perjob": "DELETE_OLD",
+  "mapreduce.jobtracker.persist.jobstatus.active": "DELETE_OLD",
+  "mapreduce.jobtracker.persist.jobstatus.dir": "DELETE_OLD",
+  "mapreduce.jobtracker.persist.jobstatus.hours": "DELETE_OLD",
+  "mapreduce.jobtracker.restart.recover": "DELETE_OLD",
+  "mapreduce.jobtracker.retiredjobs.cache.size": "DELETE_OLD",
+  "mapreduce.jobtracker.retirejobs": "DELETE_OLD",
   "mapreduce.jobtracker.split.metainfo.maxsize": "DELETE_OLD",
   "mapreduce.jobtracker.staging.root.dir": "DELETE_OLD",
-  "mapreduce.jobtracker.system.dir": "REPLACE_WITH_mapred.system.dir",
+  "mapreduce.jobtracker.taskcache.levels": "DELETE_OLD",
+  "mapreduce.jobtracker.taskscheduler": "DELETE_OLD",
+  "mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob": "DELETE_OLD",
+  "mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad": "DELETE_OLD",
+  "mapreduce.jobtracker.tasktracker.maxblacklists": "DELETE_OLD",
+  "mapreduce.jobtracker.webinterface.trusted": "DELETE_OLD",
   "mapreduce.map.java.opts": "-Xmx320m",
   "mapreduce.map.log.level": "INFO",
-  "mapreduce.map.memory.mb": "REPLACE_WITH_mapred.job.map.memory.mb",
   "mapreduce.map.sort.spill.percent": "0.1",
-  "mapreduce.map.speculative": "REPLACE_WITH_mapred.map.tasks.speculative.execution",
-  "mapreduce.output.fileoutputformat.compress.type": "REPLACE_WITH_mapred.output.compression.type",
-  "mapreduce.reduce.input.buffer.percent": "REPLACE_WITH_mapred.job.reduce.input.buffer.percent",
   "mapreduce.reduce.input.limit": "DELETE_OLD",
   "mapreduce.reduce.java.opts": "-Xmx756m",
   "mapreduce.reduce.log.level": "INFO",
-  "mapreduce.reduce.memory.mb": "REPLACE_WITH_mapred.job.reduce.memory.mb",
-  "mapreduce.reduce.shuffle.input.buffer.percent": "REPLACE_WITH_mapred.job.shuffle.input.buffer.percent",
-  "mapreduce.reduce.shuffle.merge.percent": "REPLACE_WITH_mapred.job.shuffle.merge.percent",
-  "mapreduce.reduce.shuffle.parallelcopies": "REPLACE_WITH_mapred.reduce.parallel.copies",
-  "mapreduce.reduce.speculative": "REPLACE_WITH_mapred.reduce.tasks.speculative.execution",
+  "mapreduce.reduce.merge.inmem.threshold": "DELETE_OLD",
   "mapreduce.shuffle.port": "13562",
-  "mapreduce.task.io.sort.factor": "REPLACE_WITH_io.sort.factor",
-  "mapreduce.task.io.sort.mb": "REPLACE_WITH_io.sort.mb",
-  "mapreduce.task.timeout": "REPLACE_WITH_mapred.task.timeout",
+  "mapreduce.tasktracker.cache.local.size": "DELETE_OLD",
+  "mapreduce.tasktracker.contention.tracking": "DELETE_OLD",
+  "mapreduce.tasktracker.dns.interface": "DELETE_OLD",
+  "mapreduce.tasktracker.dns.nameserver": "DELETE_OLD",
+  "mapreduce.tasktracker.events.batchsize": "DELETE_OLD",
   "mapreduce.tasktracker.group": "DELETE_OLD",
-  "mapreduce.tasktracker.healthchecker.script.path": "REPLACE_WITH_mapred.healthChecker.script.path",
-  "tasktracker.http.threads": "DELETE_OLD",
+  "mapreduce.tasktracker.healthchecker.interval": "DELETE_OLD",
+  "mapreduce.tasktracker.healthchecker.script.args": "DELETE_OLD",
+  "mapreduce.tasktracker.healthchecker.script.path": "DELETE_OLD",
+  "mapreduce.tasktracker.healthchecker.script.timeout": "DELETE_OLD",
+  "mapreduce.tasktracker.host.name": "DELETE_OLD",
+  "mapreduce.tasktracker.http.address": "DELETE_OLD",
+  "mapreduce.tasktracker.http.threads": "DELETE_OLD",
+  "mapreduce.tasktracker.indexcache.mb": "DELETE_OLD",
+  "mapreduce.tasktracker.instrumentation": "DELETE_OLD",
+  "mapreduce.tasktracker.local.dir.minspacekill": "DELETE_OLD",
+  "mapreduce.tasktracker.local.dir.minspacestart": "DELETE_OLD",
+  "mapreduce.tasktracker.map.tasks.maximum": "DELETE_OLD",
+  "mapreduce.tasktracker.net.static.resolutions": "DELETE_OLD",
+  "mapreduce.tasktracker.reduce.tasks.maximum": "DELETE_OLD",
+  "mapreduce.tasktracker.report.address": "DELETE_OLD",
+  "mapreduce.tasktracker.resourcecalculatorplugin": "DELETE_OLD",
+  "mapreduce.tasktracker.resourcecalculatorplugin": "DELETE_OLD",
+  "mapreduce.tasktracker.taskcontroller": "DELETE_OLD",
+  "mapreduce.tasktracker.taskmemorymanager.monitoringinterval": "DELETE_OLD",
+  "mapreduce.tasktracker.tasks.sleeptimebeforesigkill": "DELETE_OLD",
   "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN",
   "yarn.app.mapreduce.am.command-opts": "-Xmx756m",
   "yarn.app.mapreduce.am.log.level": "INFO",
@@ -186,10 +441,11 @@ GLOBAL = {
   "dfs_namenode_name_dir": "REPLACE_WITH_dfs_name_dir",
   "fs_checkpoint_size": "0.5",
   "io_sort_spill_percent": "DELETE_OLD",
+  "hadoop_conf_dir" : "/etc/hadoop/conf",
+  "hfile_blockcache_size" : "0.25",
   "jtnode_heapsize": "DELETE_OLD",
   "jtnode_opt_maxnewsize": "DELETE_OLD",
   "jtnode_opt_newsize": "DELETE_OLD",
-  "lzo_enabled": "DELETE_OLD",
   "mapred_child_java_opts_sz": "DELETE_OLD",
   "mapred_cluster_map_mem_mb": "DELETE_OLD",
   "mapred_cluster_max_map_mem_mb": "DELETE_OLD",
@@ -207,6 +463,8 @@ GLOBAL = {
   "mapreduce_reduce_memory_mb": "REPLACE_WITH_mapred_job_red_mem_mb",
   "mapreduce_task_io_sort_mb": "REPLACE_WITH_io_sort_mb",
   "maxtasks_per_job": "DELETE_OLD",
+  "mapreduce_userlog_retainhours": "DELETE_OLD",
+  "namenode_opt_maxnewsize" : "640m",
   "nodemanager_heapsize": "1024",
   "rca_enabled": "DELETE_OLD",
   "resourcemanager_heapsize": "1024",
@@ -221,25 +479,13 @@ GLOBAL = {
 }
 
 HDFS_SITE = {
-  "dfs.blocksize": "REPLACE_WITH_dfs.block.size",
   "dfs.client.read.shortcircuit": "true",
   "dfs.client.read.shortcircuit.streams.cache.size": "4096",
-  "dfs.datanode.balance.bandwidthPerSec": "REPLACE_WITH_dfs.balance.bandwidthPerSec",
-  "dfs.datanode.data.dir": "REPLACE_WITH_dfs.data.dir",
   "dfs.datanode.du.pct": "DELETE_OLD",
-  "dfs.datanode.max.transfer.threads": "REPLACE_WITH_dfs.datanode.max.xcievers",
   "dfs.datanode.socket.write.timeout": "DELETE_OLD",
   "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
   "dfs.hosts": "DELETE_OLD",
-  "dfs.journalnode.http-address" : "0.0.0.0:8480",
-  "dfs.namenode.https-address": "REPLACE_WITH_dfs.https.address",
-  "dfs.namenode.accesstime.precision": "REPLACE_WITH_dfs.access.time.precision",
-  "dfs.namenode.http-address": "REPLACE_WITH_dfs.http.address",
-  "dfs.namenode.name.dir": "REPLACE_WITH_dfs.name.dir",
-  "dfs.namenode.safemode.threshold-pct": "REPLACE_WITH_dfs.safemode.threshold.pct",
-  "dfs.namenode.secondary.http-address": "REPLACE_WITH_dfs.secondary.http.address",
-  "dfs.permissions.enabled": "REPLACE_WITH_dfs.permissions",
-  "dfs.permissions.superusergroup": "REPLACE_WITH_dfs.permissions.supergroup",
+  "dfs.journalnode.http-address": "0.0.0.0:8480",
   "dfs.secondary.https.port": "DELETE_OLD",
   "dfs.web.ugi": "DELETE_OLD",
   "fs.permissions.umask-mode": "022",
@@ -248,20 +494,17 @@ HDFS_SITE = {
 }
 
 CORE_SITE = {
-  "dfs.namenode.checkpoint.dir": "REPLACE_WITH_fs.checkpoint.dir",
-  "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
-  "dfs.namenode.checkpoint.period": "REPLACE_WITH_fs.checkpoint.period",
   "fs.checkpoint.size": "0.5",
-  "fs.defaultFS": "REPLACE_WITH_fs.default.name",
   "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT\n    ",
   "hadoop.security.authentication": "simple",
   "hadoop.security.authorization": "false",
   "io.compression.codec.lzo.class": "DELETE_OLD",
   "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
-  "mapreduce.jobtracker.webinterface.trusted": "REPLACE_WITH_webinterface.private.actions",
 }
 
 YARN_SITE = {
+  "yarn.acl.enable" : "true",
+  "yarn.admin.acl" : "*",
   "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
   "yarn.log-aggregation-enable": "true",
   "yarn.log-aggregation.retain-seconds": "2592000",
@@ -541,15 +784,17 @@ def get_config_resp(options, type, error_if_na=True):
 
 
 def modify_configs(options, config_type):
+  properties_to_move = [
+    "dfs.namenode.checkpoint.edits.dir",
+    "dfs.namenode.checkpoint.dir",
+    "dfs.namenode.checkpoint.period"]
   hostmapping = read_mapping()
-  # Add capacity-scheduler, mapred-queue-acls, yarn-site
+
+  # Add capacity-scheduler, yarn-site  (added with default values)
   if (config_type is None) or (config_type == CAPACITY_SCHEDULER_TAG):
     update_config(options, CAPACITY_SCHEDULER, CAPACITY_SCHEDULER_TAG)
     pass
 
-  if (config_type is None) or (config_type == MAPRED_QUEUE_ACLS_TAG):
-    update_config(options, MAPRED_QUEUE_ACLS, MAPRED_QUEUE_ACLS_TAG)
-    pass
   jt_host = hostmapping["JOBTRACKER"][0]
 
   if (config_type is None) or (config_type == YARN_SITE_TAG):
@@ -564,6 +809,21 @@ def modify_configs(options, config_type):
     update_config(options, YARN_SITE, YARN_SITE_TAG)
     pass
 
+  # Update global config
+  if (config_type is None) or (config_type == GLOBAL_TAG):
+    update_config_using_existing(options, GLOBAL_TAG, GLOBAL, True)
+    pass
+
+  core_site_latest = rename_all_properties(get_config(options, CORE_SITE_TAG), PROPERTY_MAPPING)
+  hdfs_site_latest = rename_all_properties(get_config(options, HDFS_SITE_TAG), PROPERTY_MAPPING)
+  mapred_site_latest = rename_all_properties(get_config(options, MAPRED_SITE_TAG), PROPERTY_MAPPING)
+
+  for property in properties_to_move:
+    if property in core_site_latest.keys():
+      hdfs_site_latest[property] = core_site_latest[property]
+      del core_site_latest[property]
+    pass
+
   # Update mapred-site config
   if (config_type is None) or (config_type == MAPRED_SITE_TAG):
     for key in MAPRED_SITE.keys():
@@ -572,24 +832,36 @@ def modify_configs(options, config_type):
         pass
       pass
     pass
-    update_config_using_existing(options, MAPRED_SITE_TAG, MAPRED_SITE, True)
+    update_config_using_existing_properties(options, MAPRED_SITE_TAG, MAPRED_SITE, mapred_site_latest, True)
     pass
 
-  # Update global config, hdfs-site, core-site
-  if (config_type is None) or (config_type == GLOBAL_TAG):
-    update_config_using_existing(options, GLOBAL_TAG, GLOBAL, True)
-    pass
+  # Update hdfs-site, core-site
   if (config_type is None) or (config_type == HDFS_SITE_TAG):
-    update_config_using_existing(options, HDFS_SITE_TAG, HDFS_SITE, True)
+    update_config_using_existing_properties(options, HDFS_SITE_TAG, HDFS_SITE, hdfs_site_latest, True)
     pass
   if (config_type is None) or (config_type == CORE_SITE_TAG):
-    update_config_using_existing(options, CORE_SITE_TAG, CORE_SITE, True)
+    update_config_using_existing_properties(options, CORE_SITE_TAG, CORE_SITE, core_site_latest, True)
     pass
   pass
 
 
+def rename_all_properties(properties, name_mapping):
+  for key,val in name_mapping.items():
+    if (key in properties.keys()) and (val not in properties.keys()):
+      properties[val] = properties[key]
+      del properties[key]
+    pass
+  return properties
+
+
 def update_config_using_existing(options, type, properties_template, append_unprocessed=False):
   site_properties = get_config(options, type)
+  update_config_using_existing_properties(options, type, properties_template, site_properties, append_unprocessed)
+  pass
+
+
+def update_config_using_existing_properties(options, type, properties_template,
+                                            site_properties, append_unprocessed=False):
   keys_processed = []
   keys_to_delete = []
   for key in properties_template.keys():

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/66236fe7/ambari-server/src/test/python/TestUpgradeScript_HDP2.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestUpgradeScript_HDP2.py b/ambari-server/src/test/python/TestUpgradeScript_HDP2.py
index 3cc7fc1..8252a80 100644
--- a/ambari-server/src/test/python/TestUpgradeScript_HDP2.py
+++ b/ambari-server/src/test/python/TestUpgradeScript_HDP2.py
@@ -268,7 +268,9 @@ class TestUpgradeHDP2Script(TestCase):
     pass
 
 
-  @patch.object(UpgradeHelper_HDP2, "update_config_using_existing")
+  @patch.object(UpgradeHelper_HDP2, "get_config")
+  @patch.object(UpgradeHelper_HDP2, "rename_all_properties")
+  @patch.object(UpgradeHelper_HDP2, "update_config_using_existing_properties")
   @patch.object(UpgradeHelper_HDP2, "read_mapping")
   @patch.object(logging, 'FileHandler')
   @patch.object(UpgradeHelper_HDP2, "backup_file")
@@ -276,7 +278,7 @@ class TestUpgradeHDP2Script(TestCase):
   @patch('optparse.OptionParser')
   def test_update_single_configs(self, option_parser_mock, curl_mock,
                                  backup_file_mock, file_handler_mock, read_mapping_mock,
-                                 update_config_mock):
+                                 update_config_mock, rename_all_prop_mock, get_config_mock):
     file_handler_mock.return_value = logging.FileHandler('') # disable creating real file
     opm = option_parser_mock.return_value
     options = MagicMock()
@@ -285,8 +287,19 @@ class TestUpgradeHDP2Script(TestCase):
     curl_mock.side_effect = ['', '', '', '', '', '', '']
     read_mapping_mock.return_value = {"JOBTRACKER": ["c6401"]}
     update_config_mock.side_effect = [None]
+    get_config_mock.return_value = {}
+    prop_to_move = {"dfs.namenode.checkpoint.edits.dir": "a1",
+                    "dfs.namenode.checkpoint.dir": "a2",
+                    "dfs.namenode.checkpoint.period": "a3"}
+    rename_all_prop_mock.side_effect = [
+      prop_to_move,
+      {}, {}]
     UpgradeHelper_HDP2.main()
     self.assertTrue(update_config_mock.call_count == 1)
+    args, kargs = update_config_mock.call_args_list[0]
+    self.assertEqual("hdfs-site", args[1])
+    for key in prop_to_move.keys():
+      self.assertEqual(prop_to_move[key], args[3][key])
     pass
 
   @patch.object(UpgradeHelper_HDP2, "get_config")
@@ -319,7 +332,7 @@ class TestUpgradeHDP2Script(TestCase):
       "mapred.jobtracker.maxtasks.per.job": "an_old_value",
       "mapred.jobtracker.taskScheduler": "an_old_value",
       "mapred.task.tracker.task-controller": "an_old_value",
-      "mapred.userlog.retain.hours": "an_old_value",
+      "mapred.userlog.retain.hours": "will_not_be_stored",
       "global1": "global11"
     }
     UpgradeHelper_HDP2.GLOBAL = {"global2": "REPLACE_WITH_global1"}
@@ -327,13 +340,12 @@ class TestUpgradeHDP2Script(TestCase):
     UpgradeHelper_HDP2.CORE_SITE = {"global2": "REPLACE_WITH_global1"}
     UpgradeHelper_HDP2.main()
     self.validate_update_config_call(curl_mock.call_args_list[0], "capacity-scheduler")
-    self.validate_update_config_call(curl_mock.call_args_list[1], "mapred-queue-acls")
-    self.validate_update_config_call(curl_mock.call_args_list[2], "yarn-site")
+    self.validate_update_config_call(curl_mock.call_args_list[1], "yarn-site")
     self.validate_update_config_call(curl_mock.call_args_list[3], "mapred-site")
-    self.validate_update_config_call(curl_mock.call_args_list[4], "global")
-    self.validate_config_replacememt(curl_mock.call_args_list[2], "yarn-site")
+    self.validate_update_config_call(curl_mock.call_args_list[2], "global")
+    self.validate_config_replacememt(curl_mock.call_args_list[1], "yarn-site")
     self.validate_config_replacememt(curl_mock.call_args_list[3], "mapred-site")
-    self.validate_config_replacememt(curl_mock.call_args_list[4], "global")
+    self.validate_config_replacememt(curl_mock.call_args_list[2], "global")
     pass
 
   @patch.object(UpgradeHelper_HDP2, "read_mapping")
@@ -410,6 +422,20 @@ class TestUpgradeHDP2Script(TestCase):
       pass
     pass
 
+  def test_rename_all_properties(self):
+    site_properties = {
+      "mapred.task.is.map": "mapreduce.task.ismap",
+      "mapred.task.partition": "mapreduce.task.partition",
+      "mapred.task.profile": "mapreduce.task.profile",
+      "abc": "abc"
+    }
+    site_properties = \
+      UpgradeHelper_HDP2.rename_all_properties(site_properties, UpgradeHelper_HDP2.PROPERTY_MAPPING)
+    for key in site_properties.keys():
+      self.assertEqual(key, site_properties[key])
+    self.assertEqual(4, len(site_properties))
+    pass
+
   def test_tags_count(self):
     def count_tags(template):
       deleted = 0
@@ -430,15 +456,15 @@ class TestUpgradeHDP2Script(TestCase):
     self.assertEqual(18, deleted)
 
     deleted, replaced = count_tags(UpgradeHelper_HDP2.MAPRED_SITE)
-    self.assertEqual(17, replaced)
-    self.assertEqual(60, deleted)
+    self.assertEqual(2, replaced)
+    self.assertEqual(71, deleted)
 
     deleted, replaced = count_tags(UpgradeHelper_HDP2.CORE_SITE)
-    self.assertEqual(4, replaced)
+    self.assertEqual(0, replaced)
     self.assertEqual(1, deleted)
 
     deleted, replaced = count_tags(UpgradeHelper_HDP2.HDFS_SITE)
-    self.assertEqual(12, replaced)
+    self.assertEqual(0, replaced)
     self.assertEqual(7, deleted)
     pass
 
@@ -457,7 +483,7 @@ class TestUpgradeHDP2Script(TestCase):
       self.assertTrue("c6401" in args[6])
       self.assertFalse("an_old_value" in args[6])
     elif type == "mapred-site":
-      self.assertTrue("an_old_value" in args[6])
+      self.assertFalse("will_not_be_stored" in args[6])
     elif type == "global":
       self.assertTrue("global11" in args[6])
       self.assertTrue("an_old_value" in args[6])