You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2013/08/09 13:52:32 UTC
git commit: AMBARI-2835. Replace deprecated property names. (ncole)
Updated Branches:
refs/heads/trunk 65662172f -> 28a4ae836
AMBARI-2835. Replace deprecated property names. (ncole)
Project: http://git-wip-us.apache.org/repos/asf/incubator-ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ambari/commit/28a4ae83
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ambari/tree/28a4ae83
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ambari/diff/28a4ae83
Branch: refs/heads/trunk
Commit: 28a4ae836bcb0098867e29835476d18480c4d906
Parents: 6566217
Author: Nate Cole <nc...@hortonworks.com>
Authored: Thu Aug 8 13:25:30 2013 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Fri Aug 9 07:51:16 2013 -0400
----------------------------------------------------------------------
.../modules/hdp-hadoop/manifests/params.pp | 30 ++++++------
.../main/puppet/modules/hdp/manifests/init.pp | 21 ++++++--
.../main/puppet/modules/hdp/manifests/params.pp | 6 ++-
.../services/HBASE/configuration/hbase-site.xml | 11 +++++
.../services/HDFS/configuration/core-site.xml | 18 +++----
.../services/HDFS/configuration/global.xml | 8 ++--
.../HDFS/configuration/hadoop-policy.xml | 4 +-
.../services/HDFS/configuration/hdfs-site.xml | 32 ++++++-------
.../MAPREDUCE2/configuration/mapred-site.xml | 50 ++++++++++----------
.../services/OOZIE/configuration/oozie-site.xml | 6 +--
.../services/HBASE/configuration/hbase-site.xml | 11 +++++
.../services/HDFS/configuration/core-site.xml | 10 ++--
.../services/HDFS/configuration/global.xml | 8 ++--
.../HDFS/configuration/hadoop-policy.xml | 4 +-
.../services/HDFS/configuration/hdfs-site.xml | 32 ++++++-------
.../MAPREDUCE2/configuration/mapred-site.xml | 50 ++++++++++----------
.../services/OOZIE/configuration/oozie-site.xml | 6 +--
ambari-web/app/data/HDP2/config_mapping.js | 47 +++++++++---------
ambari-web/app/data/HDP2/config_properties.js | 20 ++++----
ambari-web/app/models/service_config.js | 8 ++++
20 files changed, 214 insertions(+), 168 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
index 92c38f8..a4abb1d 100644
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
@@ -110,7 +110,11 @@ class hdp-hadoop::params(
}
### core-site
- $fs_checkpoint_dir = hdp_default("core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
+ if (hdp_get_major_stack_version($stack_version) >= 2) {
+ $fs_checkpoint_dir = hdp_default("core-site/dfs.namenode.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
+ } else {
+ $fs_checkpoint_dir = hdp_default("core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
+ }
$proxyuser_group = hdp_default("core-site/proxyuser.group","users")
@@ -131,7 +135,11 @@ class hdp-hadoop::params(
$dfs_include = hdp_default("hdfs-site/dfs.include","dfs.include")
- $dfs_name_dir = hdp_default("hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
+ if (hdp_get_major_stack_version($stack_version) >= 2) {
+ $dfs_name_dir = hdp_default("hdfs-site/dfs.namenode.name.dir","/tmp/hadoop-hdfs/dfs/name")
+ } else {
+ $dfs_name_dir = hdp_default("hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
+ }
$dfs_replication = hdp_default("hdfs-site/dfs.replication",3)
@@ -148,10 +156,6 @@ class hdp-hadoop::params(
$mapred_system_dir = '/mapred/system'
- $io_sort_mb = hdp_default("mapred-site/io.sort.mb","200")
-
- $io_sort_spill_percent = hdp_default("mapred-site/io.sort.spill.percent","0.9")
-
$mapred_child_java_opts_sz = hdp_default("mapred-site/mapred.child.java.opts.sz","-Xmx768m")
$mapred_cluster_map_mem_mb = hdp_default("mapred-site/mapred.cluster.map.mem.mb","-1")
@@ -162,22 +166,18 @@ class hdp-hadoop::params(
$mapred_cluster_red_mem_mb = hdp_default("mapred-site/mapred.cluster.red.mem.mb","-1")
- $mapred_hosts_exclude = hdp_default("mapred-site/mapred.hosts.exclude","mapred.exclude")
-
- $mapred_hosts_include = hdp_default("mapred-site/mapred.hosts.include","mapred.include")
-
$mapred_job_map_mem_mb = hdp_default("mapred-site/mapred.job.map.mem.mb","-1")
$mapred_job_red_mem_mb = hdp_default("mapred-site/mapred.job.red.mem.mb","-1")
$mapred_jobstatus_dir = hdp_default("mapred-site/mapred.jobstatus.dir","file:////mapred/jobstatus")
- $mapred_local_dir = hdp_default("mapred-site/mapred.local.dir","/tmp/hadoop-mapred/mapred/local")
+ if (hdp_get_major_stack_version($stack_version) >= 2) {
+ $mapred_local_dir = hdp_default("mapred-site/mapreduce.cluster.local.dir","/tmp/hadoop-mapred/mapred/local")
+ } else {
+ $mapred_local_dir = hdp_default("mapred-site/mapred.local.dir","/tmp/hadoop-mapred/mapred/local")
+ }
- $mapred_map_tasks_max = hdp_default("mapred-site/mapred.map.tasks.max",4)
-
- $mapred_red_tasks_max = hdp_default("mapred-site/mapred.red.tasks.max",4)
-
$mapreduce_userlog_retainhours = hdp_default("mapred-site/mapreduce.userlog.retainhours",24)
$maxtasks_per_job = hdp_default("mapred-site/maxtasks.per.job","-1")
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
index 7a53ed6..26b5fe7 100644
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
@@ -36,8 +36,15 @@ class hdp(
## Port settings
if has_key($configuration, 'hdfs-site') {
$hdfs-site = $configuration['hdfs-site']
- $namenode_port = hdp_get_port_from_url($hdfs-site["dfs.http.address"])
- $snamenode_port = hdp_get_port_from_url($hdfs-site["dfs.secondary.http.address"])
+
+ if (hdp_get_major_stack_version($stack_version) >= 2) {
+ $namenode_port = hdp_get_port_from_url($hdfs-site["dfs.namenode.http-address"])
+ $snamenode_port = hdp_get_port_from_url($hdfs-site["dfs.namenode.secondary.http-address"])
+ } else {
+ $namenode_port = hdp_get_port_from_url($hdfs-site["dfs.http.address"])
+ $snamenode_port = hdp_get_port_from_url($hdfs-site["dfs.secondary.http.address"])
+ }
+
$datanode_port = hdp_get_port_from_url($hdfs-site["dfs.datanode.http.address"])
$journalnode_port = hdp_get_port_from_url($hdfs-site["dfs.journalnode.http-address"])
} else {
@@ -49,8 +56,14 @@ class hdp(
if has_key($configuration, 'mapred-site') {
$mapred-site = $configuration['mapred-site']
- $jtnode_port = hdp_get_port_from_url($mapred-site["mapred.job.tracker.http.address"],"50030")
- $tasktracker_port = hdp_get_port_from_url($mapred-site["mapred.task.tracker.http.address"],"50060")
+
+ if (hdp_get_major_stack_version($stack_version) >= 2) {
+ $jtnode_port = hdp_get_port_from_url($mapred-site["mapreduce.jobtracker.http.address"],"50030")
+ $tasktracker_port = hdp_get_port_from_url($mapred-site["mapreduce.tasktracker.http.address"],"50060")
+ } else {
+ $jtnode_port = hdp_get_port_from_url($mapred-site["mapred.job.tracker.http.address"],"50030")
+ $tasktracker_port = hdp_get_port_from_url($mapred-site["mapred.task.tracker.http.address"],"50060")
+ }
$jobhistory_port = hdp_get_port_from_url($mapred-site["mapreduce.history.server.http.address"],"51111")
$hs_port = hdp_get_port_from_url($mapred-site["mapreduce.jobhistory.webapp.address"],"19888")
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
index ef9d4d4..ce7f75f 100644
--- a/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
+++ b/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
@@ -298,7 +298,11 @@ class hdp::params()
$exec_path = ["/bin","/usr/bin", "/usr/sbin"]
#### params used on multiple modules
- $dfs_data_dir = hdp_default("hdfs-site/dfs.data.dir","/tmp/hadoop-hdfs/dfs/data")
+ if (hdp_get_major_stack_version($stack_version) >= 2) {
+ $dfs_data_dir = hdp_default("hdfs-site/dfs.datanode.data.dir","/tmp/hadoop-hdfs/dfs/data")
+ } else {
+ $dfs_data_dir = hdp_default("hdfs-site/dfs.data.dir","/tmp/hadoop-hdfs/dfs/data")
+ }
### artifact dir
$artifact_dir = hdp_default("artifact_dir","/tmp/HDP-artifacts/")
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HBASE/configuration/hbase-site.xml
index be9c023..ecd9e50 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HBASE/configuration/hbase-site.xml
@@ -363,5 +363,16 @@
</description>
</property>
+ <property>
+ <name>hbase.defaults.for.version.skip</name>
+ <value>true</value>
+ <description>Disables version verification.</description>
+ </property>
+
+ <property>
+ <name>dfs.domain.socket.path</name>
+ <value>/var/lib/hadoop-hdfs/dn_socket</value>
+ <description>Path to domain socket.</description>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
index ed6523c..ceab4ed 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/core-site.xml
@@ -54,7 +54,7 @@
<!-- file system properties -->
<property>
- <name>fs.default.name</name>
+ <name>fs.defaultFS</name>
<!-- cluster variant -->
<value></value>
<description>The name of the default file system. Either the
@@ -71,7 +71,7 @@
</property>
<property>
- <name>fs.checkpoint.dir</name>
+ <name>dfs.namenode.checkpoint.dir</name>
<value></value>
<description>Determines where on the local filesystem the DFS secondary
name node should store the temporary images to merge.
@@ -81,18 +81,18 @@
</property>
<property>
- <name>fs.checkpoint.edits.dir</name>
- <value>${fs.checkpoint.dir}</value>
+ <name>dfs.namenode.checkpoint.edits.dir</name>
+ <value>${dfs.namenode.checkpoint.dir}</value>
<description>Determines where on the local filesystem the DFS secondary
name node should store the temporary edits to merge.
If this is a comma-delimited list of directoires then teh edits is
replicated in all of the directoires for redundancy.
- Default value is same as fs.checkpoint.dir
+ Default value is same as dfs.namenode.checkpoint.dir
</description>
</property>
<property>
- <name>fs.checkpoint.period</name>
+ <name>dfs.namenode.checkpoint.period</name>
<value>21600</value>
<description>The number of seconds between two periodic checkpoints.
</description>
@@ -102,7 +102,7 @@
<name>fs.checkpoint.size</name>
<value>536870912</value>
<description>The size of the current edit log (in bytes) that triggers
- a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+ a periodic checkpoint even if the dfs.namenode.checkpoint.period hasn't expired.
</description>
</property>
@@ -131,7 +131,7 @@
<!-- Web Interface Configuration -->
<property>
- <name>webinterface.private.actions</name>
+ <name>mapreduce.jobtracker.webinterface.trusted</name>
<value>false</value>
<description> If set to true, the web interfaces of JT and NN may contain
actions, such as kill job, delete file, etc., that should
@@ -166,7 +166,7 @@
RULE:[2:$1@$0](rs@.*)s/.*/hbase/
DEFAULT
</value>
-<description>The mapping from kerberos principal names to local OS user names.
+<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
"omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
The translations rules have 3 sections:
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
index dbbe81b..5f4dab2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
@@ -27,7 +27,7 @@
<description>NameNode Host.</description>
</property>
<property>
- <name>dfs_name_dir</name>
+ <name>dfs_namenode_name_dir</name>
<value>/hadoop/hdfs/namenode</value>
<description>NameNode Directories.</description>
</property>
@@ -37,7 +37,7 @@
<description>Secondary NameNode.</description>
</property>
<property>
- <name>fs_checkpoint_dir</name>
+ <name>dfs_namenode_checkpoint_dir</name>
<value>/hadoop/hdfs/namesecondary</value>
<description>Secondary NameNode checkpoint dir.</description>
</property>
@@ -47,7 +47,7 @@
<description>List of Datanode Hosts.</description>
</property>
<property>
- <name>dfs_data_dir</name>
+ <name>dfs_datanode_data_dir</name>
<value>/hadoop/hdfs/data</value>
<description>Data directories for Data Nodes.</description>
</property>
@@ -102,7 +102,7 @@
<description>DataNode volumes failure toleration</description>
</property>
<property>
- <name>fs_checkpoint_period</name>
+ <name>dfs_namenode_checkpoint_period</name>
<value>21600</value>
<description>HDFS Maximum Checkpoint Delay</description>
</property>
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
index 6ec304d..51b01bb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hadoop-policy.xml
@@ -82,7 +82,7 @@
</property>
<property>
- <name>security.job.submission.protocol.acl</name>
+ <name>security.job.client.protocol.acl</name>
<value>*</value>
<description>ACL for JobSubmissionProtocol, used by job clients to
communciate with the jobtracker for job submission, querying job status etc.
@@ -92,7 +92,7 @@
</property>
<property>
- <name>security.task.umbilical.protocol.acl</name>
+ <name>security.job.task.protocol.acl</name>
<value>*</value>
<description>ACL for TaskUmbilicalProtocol, used by the map and reduce
tasks to communicate with the parent tasktracker.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
index fcd6015..b7d6f0e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/HDFS/configuration/hdfs-site.xml
@@ -25,7 +25,7 @@
<!-- file system properties -->
<property>
- <name>dfs.name.dir</name>
+ <name>dfs.namenode.name.dir</name>
<!-- cluster variant -->
<value></value>
<description>Determines where on the local filesystem the DFS name node
@@ -74,7 +74,7 @@
</property>
<property>
- <name>dfs.data.dir</name>
+ <name>dfs.datanode.data.dir</name>
<value></value>
<description>Determines where on the local filesystem an DFS data node
should store its blocks. If this is a comma-delimited
@@ -141,18 +141,18 @@
</property>
<property>
- <name>dfs.safemode.threshold.pct</name>
+ <name>dfs.namenode.safemode.threshold-pct</name>
<value>1.0f</value>
<description>
Specifies the percentage of blocks that should satisfy
- the minimal replication requirement defined by dfs.replication.min.
+ the minimal replication requirement defined by dfs.namenode.replication.min.
Values less than or equal to 0 mean not to start in safe mode.
Values greater than 1 will make safe mode permanent.
</description>
</property>
<property>
- <name>dfs.balance.bandwidthPerSec</name>
+ <name>dfs.datanode.balance.bandwidthPerSec</name>
<value>6250000</value>
<description>
Specifies the maximum amount of bandwidth that each datanode
@@ -172,13 +172,13 @@
</property>
<property>
- <name>dfs.block.size</name>
+ <name>dfs.blocksize</name>
<value>134217728</value>
<description>The default block size for new files.</description>
</property>
<property>
- <name>dfs.http.address</name>
+ <name>dfs.namenode.http-address</name>
<value></value>
<description>The name of the default file system. Either the
literal string "local" or a host:port for NDFS.</description>
@@ -222,7 +222,7 @@ If the port is 0 then the server will start on a free port.
</property>
<property>
-<name>dfs.datanode.max.xcievers</name>
+<name>dfs.datanode.max.transfer.threads</name>
<value>1024</value>
<description>PRIVATE CONFIG VARIABLE</description>
</property>
@@ -230,7 +230,7 @@ If the port is 0 then the server will start on a free port.
<!-- Permissions configuration -->
<property>
-<name>dfs.umaskmode</name>
+<name>fs.permissions.umask-mode</name>
<value>022</value>
<description>
The octal umask used when creating files and directories.
@@ -247,7 +247,7 @@ Syntax: USERNAME,GROUP1,GROUP2, ...
</property>
<property>
-<name>dfs.permissions</name>
+<name>dfs.permissions.enabled</name>
<value>true</value>
<description>
If "true", enable permission checking in HDFS.
@@ -259,7 +259,7 @@ owner or group of files or directories.
</property>
<property>
-<name>dfs.permissions.supergroup</name>
+<name>dfs.permissions.enabled.superusergroup</name>
<value>hdfs</value>
<description>The name of the group of super-users.</description>
</property>
@@ -319,7 +319,7 @@ Kerberos principal name for the NameNode
<property>
<!-- cluster variant -->
- <name>dfs.secondary.http.address</name>
+ <name>dfs.namenode.secondary.http-address</name>
<value></value>
<description>Address of secondary namenode web server</description>
</property>
@@ -389,7 +389,7 @@ Kerberos principal name for the NameNode
</property>
<property>
- <name>dfs.https.address</name>
+ <name>dfs.namenode.https-address</name>
<value></value>
<description>The https address where namenode binds</description>
@@ -398,14 +398,14 @@ Kerberos principal name for the NameNode
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>750</value>
-<description>The permissions that should be there on dfs.data.dir
+<description>The permissions that should be there on dfs.datanode.data.dir
directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
+different on existing dfs.datanode.data.dir directories. If the directories
don't exist, they will be created with this permission.</description>
</property>
<property>
- <name>dfs.access.time.precision</name>
+ <name>dfs.namenode.accesstime.precision</name>
<value>0</value>
<description>The access time for HDFS file is precise upto this value.
The default value is 1 hour. Setting a value of 0 disables
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
index 44052cc..53b7fb7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -25,7 +25,7 @@
<!-- i/o properties -->
<property>
- <name>io.sort.mb</name>
+ <name>mapreduce.task.io.sort.mb</name>
<value>100</value>
<description>No description</description>
</property>
@@ -37,13 +37,13 @@
</property>
<property>
- <name>io.sort.spill.percent</name>
+ <name>mapreduce.map.sort.spill.percent</name>
<value>0.1</value>
<description>No description</description>
</property>
<property>
- <name>io.sort.factor</name>
+ <name>mapreduce.task.io.sort.factor</name>
<value>100</value>
<description>No description</description>
</property>
@@ -51,7 +51,7 @@
<!-- map/reduce properties -->
<property>
- <name>mapred.system.dir</name>
+ <name>mapreduce.jobtracker.system.dir</name>
<value></value>
<description>No description</description>
<final>true</final>
@@ -59,45 +59,45 @@
<property>
<!-- cluster specific -->
- <name>mapred.local.dir</name>
+ <name>mapreduce.cluster.local.dir</name>
<value></value>
<description>No description</description>
<final>true</final>
</property>
<property>
- <name>mapred.reduce.parallel.copies</name>
+ <name>mapreduce.reduce.shuffle.parallelcopies</name>
<value>30</value>
<description>No description</description>
</property>
<property>
- <name>mapred.tasktracker.map.tasks.maximum</name>
+ <name>mapreduce.tasktracker.map.tasks.maximum</name>
<value></value>
<description>No description</description>
</property>
<property>
- <name>mapred.map.tasks.speculative.execution</name>
+ <name>mapreduce.map.speculative</name>
<value>false</value>
<description>If true, then multiple instances of some map tasks
may be executed in parallel.</description>
</property>
<property>
- <name>mapred.reduce.tasks.speculative.execution</name>
+ <name>mapreduce.reduce.speculative</name>
<value>false</value>
<description>If true, then multiple instances of some reduce tasks
may be executed in parallel.</description>
</property>
<property>
- <name>mapred.reduce.slowstart.completed.maps</name>
+ <name>mapreduce.job.reduce.slowstart.completedmaps</name>
<value>0.05</value>
</property>
<property>
- <name>mapred.inmem.merge.threshold</name>
+ <name>mapreduce.reduce.merge.inmem.threshold</name>
<value>1000</value>
<description>The threshold, in terms of the number of files
for the in-memory merge process. When we accumulate threshold number of files
@@ -108,17 +108,17 @@
</property>
<property>
- <name>mapred.job.shuffle.merge.percent</name>
+ <name>mapreduce.reduce.shuffle.merge.percent</name>
<value>0.66</value>
<description>The usage threshold at which an in-memory merge will be
initiated, expressed as a percentage of the total memory allocated to
storing in-memory map outputs, as defined by
- mapred.job.shuffle.input.buffer.percent.
+ mapreduce.reduce.shuffle.input.buffer.percent.
</description>
</property>
<property>
- <name>mapred.job.shuffle.input.buffer.percent</name>
+ <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
<value>0.7</value>
<description>The percentage of memory to be allocated from the maximum heap
size to storing map outputs during the shuffle.
@@ -126,7 +126,7 @@
</property>
<property>
- <name>mapred.map.output.compression.codec</name>
+ <name>mapreduce.map.output.compress.codec</name>
<value></value>
<description>If the map outputs are compressed, how should they be
compressed
@@ -134,7 +134,7 @@
</property>
<property>
- <name>mapred.output.compression.type</name>
+ <name>mapreduce.output.fileoutputformat.compress.type</name>
<value>BLOCK</value>
<description>If the job outputs are to compressed as SequenceFiles, how should
they be compressed? Should be one of NONE, RECORD or BLOCK.
@@ -142,7 +142,7 @@
</property>
<property>
- <name>mapred.job.reduce.input.buffer.percent</name>
+ <name>mapreduce.reduce.input.buffer.percent</name>
<value>0.0</value>
<description>The percentage of memory- relative to the maximum heap size- to
retain map outputs during the reduce. When the shuffle is concluded, any
@@ -163,13 +163,13 @@
<!-- copied from kryptonite configuration -->
<property>
- <name>mapred.compress.map.output</name>
+ <name>mapreduce.map.output.compress</name>
<value></value>
</property>
<property>
- <name>mapred.task.timeout</name>
+ <name>mapreduce.task.timeout</name>
<value>600000</value>
<description>The number of milliseconds before a task will be
terminated if it neither reads an input, writes an output, nor
@@ -195,22 +195,22 @@
</property>
<property>
- <name>mapred.cluster.reduce.memory.mb</name>
+ <name>mapreduce.cluster.reducememory.mb</name>
<value>2048</value>
</property>
<property>
- <name>mapred.job.map.memory.mb</name>
+ <name>mapreduce.map.memory.mb</name>
<value>1536</value>
</property>
<property>
- <name>mapred.job.reduce.memory.mb</name>
+ <name>mapreduce.reduce.memory.mb</name>
<value>1024</value>
</property>
<property>
- <name>mapred.max.tracker.blacklists</name>
+ <name>mapreduce.jobtracker.tasktracker.maxblacklists</name>
<value>16</value>
<description>
if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
@@ -218,12 +218,12 @@
</property>
<property>
- <name>mapred.healthChecker.script.path</name>
+ <name>mapreduce.tasktracker.healthchecker.script.path</name>
<value></value>
</property>
<property>
- <name>mapred.healthChecker.script.timeout</name>
+ <name>mapreduce.tasktracker.healthchecker.script.timeout</name>
<value>60000</value>
</property>
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
index 1665ba8..5fb49ed 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.5/services/OOZIE/configuration/oozie-site.xml
@@ -31,7 +31,7 @@
<property>
<name>oozie.system.id</name>
- <value>oozie-${user.name}</value>
+ <value>oozie-${mapreduce.job.user.name}</value>
<description>
The Oozie system ID.
</description>
@@ -133,7 +133,7 @@
<property>
<name>oozie.service.WorkflowAppService.system.libpath</name>
- <value>/user/${user.name}/share/lib</value>
+ <value>/user/${mapreduce.job.user.name}/share/lib</value>
<description>
System library path to use for workflow applications.
This path is added to workflow application if their job properties sets
@@ -242,4 +242,4 @@
Max number of connections.
</description>
</property>
-</configuration>
\ No newline at end of file
+</configuration>
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-site.xml
index be9c023..ecd9e50 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-site.xml
@@ -363,5 +363,16 @@
</description>
</property>
+ <property>
+ <name>hbase.defaults.for.version.skip</name>
+ <value>true</value>
+ <description>Disables version verification.</description>
+ </property>
+
+ <property>
+ <name>dfs.domain.socket.path</name>
+ <value>/var/lib/hadoop-hdfs/dn_socket</value>
+ <description>Path to domain socket.</description>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml
index e646d5b..e6742a5 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml
@@ -54,7 +54,7 @@
<!-- file system properties -->
<property>
- <name>fs.default.name</name>
+ <name>fs.defaultFS</name>
<!-- cluster variant -->
<value></value>
<description>The name of the default file system. Either the
@@ -71,7 +71,7 @@
</property>
<property>
- <name>fs.checkpoint.dir</name>
+ <name>dfs.namenode.checkpoint.dir</name>
<value></value>
<description>Determines where on the local filesystem the DFS secondary
name node should store the temporary images to merge.
@@ -81,13 +81,13 @@
</property>
<property>
- <name>fs.checkpoint.edits.dir</name>
- <value>${fs.checkpoint.dir}</value>
+ <name>dfs.namenode.checkpoint.edits.dir</name>
+ <value>${dfs.namenode.checkpoint.dir}</value>
<description>Determines where on the local filesystem the DFS secondary
name node should store the temporary edits to merge.
If this is a comma-delimited list of directoires then teh edits is
replicated in all of the directoires for redundancy.
- Default value is same as fs.checkpoint.dir
+ Default value is same as dfs.namenode.checkpoint.dir
</description>
</property>
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/global.xml
index dbbe81b..5f4dab2 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/global.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/global.xml
@@ -27,7 +27,7 @@
<description>NameNode Host.</description>
</property>
<property>
- <name>dfs_name_dir</name>
+ <name>dfs_namenode_name_dir</name>
<value>/hadoop/hdfs/namenode</value>
<description>NameNode Directories.</description>
</property>
@@ -37,7 +37,7 @@
<description>Secondary NameNode.</description>
</property>
<property>
- <name>fs_checkpoint_dir</name>
+ <name>dfs_namenode_checkpoint_dir</name>
<value>/hadoop/hdfs/namesecondary</value>
<description>Secondary NameNode checkpoint dir.</description>
</property>
@@ -47,7 +47,7 @@
<description>List of Datanode Hosts.</description>
</property>
<property>
- <name>dfs_data_dir</name>
+ <name>dfs_datanode_data_dir</name>
<value>/hadoop/hdfs/data</value>
<description>Data directories for Data Nodes.</description>
</property>
@@ -102,7 +102,7 @@
<description>DataNode volumes failure toleration</description>
</property>
<property>
- <name>fs_checkpoint_period</name>
+ <name>dfs_namenode_checkpoint_period</name>
<value>21600</value>
<description>HDFS Maximum Checkpoint Delay</description>
</property>
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hadoop-policy.xml
index 6ec304d..51b01bb 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hadoop-policy.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hadoop-policy.xml
@@ -82,7 +82,7 @@
</property>
<property>
- <name>security.job.submission.protocol.acl</name>
+ <name>security.job.client.protocol.acl</name>
<value>*</value>
<description>ACL for JobSubmissionProtocol, used by job clients to
communciate with the jobtracker for job submission, querying job status etc.
@@ -92,7 +92,7 @@
</property>
<property>
- <name>security.task.umbilical.protocol.acl</name>
+ <name>security.job.task.protocol.acl</name>
<value>*</value>
<description>ACL for TaskUmbilicalProtocol, used by the map and reduce
tasks to communicate with the parent tasktracker.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml
index 10d58c5..db08e2a 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml
@@ -25,7 +25,7 @@
<!-- file system properties -->
<property>
- <name>dfs.name.dir</name>
+ <name>dfs.namenode.name.dir</name>
<!-- cluster variant -->
<value></value>
<description>Determines where on the local filesystem the DFS name node
@@ -74,7 +74,7 @@
</property>
<property>
- <name>dfs.data.dir</name>
+ <name>dfs.datanode.data.dir</name>
<value></value>
<description>Determines where on the local filesystem an DFS data node
should store its blocks. If this is a comma-delimited
@@ -141,18 +141,18 @@
</property>
<property>
- <name>dfs.safemode.threshold.pct</name>
+ <name>dfs.namenode.safemode.threshold-pct</name>
<value>1.0f</value>
<description>
Specifies the percentage of blocks that should satisfy
- the minimal replication requirement defined by dfs.replication.min.
+ the minimal replication requirement defined by dfs.namenode.replication.min.
Values less than or equal to 0 mean not to start in safe mode.
Values greater than 1 will make safe mode permanent.
</description>
</property>
<property>
- <name>dfs.balance.bandwidthPerSec</name>
+ <name>dfs.datanode.balance.bandwidthPerSec</name>
<value>6250000</value>
<description>
Specifies the maximum amount of bandwidth that each datanode
@@ -172,13 +172,13 @@
</property>
<property>
- <name>dfs.block.size</name>
+ <name>dfs.blocksize</name>
<value>134217728</value>
<description>The default block size for new files.</description>
</property>
<property>
- <name>dfs.http.address</name>
+ <name>dfs.namenode.http-address</name>
<value></value>
<description>The name of the default file system. Either the
literal string "local" or a host:port for NDFS.</description>
@@ -222,7 +222,7 @@ If the port is 0 then the server will start on a free port.
</property>
<property>
-<name>dfs.datanode.max.xcievers</name>
+<name>dfs.datanode.max.transfer.threads</name>
<value>1024</value>
<description>PRIVATE CONFIG VARIABLE</description>
</property>
@@ -230,7 +230,7 @@ If the port is 0 then the server will start on a free port.
<!-- Permissions configuration -->
<property>
-<name>dfs.umaskmode</name>
+<name>fs.permissions.umask-mode</name>
<value>022</value>
<description>
The octal umask used when creating files and directories.
@@ -247,7 +247,7 @@ Syntax: USERNAME,GROUP1,GROUP2, ...
</property>
<property>
-<name>dfs.permissions</name>
+<name>dfs.permissions.enabled</name>
<value>true</value>
<description>
If "true", enable permission checking in HDFS.
@@ -259,7 +259,7 @@ owner or group of files or directories.
</property>
<property>
-<name>dfs.permissions.supergroup</name>
+<name>dfs.permissions.enabled.superusergroup</name>
<value>hdfs</value>
<description>The name of the group of super-users.</description>
</property>
@@ -319,7 +319,7 @@ Kerberos principal name for the NameNode
<property>
<!-- cluster variant -->
- <name>dfs.secondary.http.address</name>
+ <name>dfs.namenode.secondary.http-address</name>
<value></value>
<description>Address of secondary namenode web server</description>
</property>
@@ -389,7 +389,7 @@ Kerberos principal name for the NameNode
</property>
<property>
- <name>dfs.https.address</name>
+ <name>dfs.namenode.https-address</name>
<value></value>
<description>The https address where namenode binds</description>
@@ -398,14 +398,14 @@ Kerberos principal name for the NameNode
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>750</value>
-<description>The permissions that should be there on dfs.data.dir
+<description>The permissions that should be there on dfs.datanode.data.dir
directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
+different on existing dfs.datanode.data.dir directories. If the directories
don't exist, they will be created with this permission.</description>
</property>
<property>
- <name>dfs.access.time.precision</name>
+ <name>dfs.namenode.accesstime.precision</name>
<value>0</value>
<description>The access time for HDFS file is precise upto this value.
The default value is 1 hour. Setting a value of 0 disables
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
index 44052cc..53b7fb7 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -25,7 +25,7 @@
<!-- i/o properties -->
<property>
- <name>io.sort.mb</name>
+ <name>mapreduce.task.io.sort.mb</name>
<value>100</value>
<description>No description</description>
</property>
@@ -37,13 +37,13 @@
</property>
<property>
- <name>io.sort.spill.percent</name>
+ <name>mapreduce.map.sort.spill.percent</name>
<value>0.1</value>
<description>No description</description>
</property>
<property>
- <name>io.sort.factor</name>
+ <name>mapreduce.task.io.sort.factor</name>
<value>100</value>
<description>No description</description>
</property>
@@ -51,7 +51,7 @@
<!-- map/reduce properties -->
<property>
- <name>mapred.system.dir</name>
+ <name>mapreduce.jobtracker.system.dir</name>
<value></value>
<description>No description</description>
<final>true</final>
@@ -59,45 +59,45 @@
<property>
<!-- cluster specific -->
- <name>mapred.local.dir</name>
+ <name>mapreduce.cluster.local.dir</name>
<value></value>
<description>No description</description>
<final>true</final>
</property>
<property>
- <name>mapred.reduce.parallel.copies</name>
+ <name>mapreduce.reduce.shuffle.parallelcopies</name>
<value>30</value>
<description>No description</description>
</property>
<property>
- <name>mapred.tasktracker.map.tasks.maximum</name>
+ <name>mapreduce.tasktracker.map.tasks.maximum</name>
<value></value>
<description>No description</description>
</property>
<property>
- <name>mapred.map.tasks.speculative.execution</name>
+ <name>mapreduce.map.speculative</name>
<value>false</value>
<description>If true, then multiple instances of some map tasks
may be executed in parallel.</description>
</property>
<property>
- <name>mapred.reduce.tasks.speculative.execution</name>
+ <name>mapreduce.reduce.speculative</name>
<value>false</value>
<description>If true, then multiple instances of some reduce tasks
may be executed in parallel.</description>
</property>
<property>
- <name>mapred.reduce.slowstart.completed.maps</name>
+ <name>mapreduce.job.reduce.slowstart.completedmaps</name>
<value>0.05</value>
</property>
<property>
- <name>mapred.inmem.merge.threshold</name>
+ <name>mapreduce.reduce.merge.inmem.threshold</name>
<value>1000</value>
<description>The threshold, in terms of the number of files
for the in-memory merge process. When we accumulate threshold number of files
@@ -108,17 +108,17 @@
</property>
<property>
- <name>mapred.job.shuffle.merge.percent</name>
+ <name>mapreduce.reduce.shuffle.merge.percent</name>
<value>0.66</value>
<description>The usage threshold at which an in-memory merge will be
initiated, expressed as a percentage of the total memory allocated to
storing in-memory map outputs, as defined by
- mapred.job.shuffle.input.buffer.percent.
+ mapreduce.reduce.shuffle.input.buffer.percent.
</description>
</property>
<property>
- <name>mapred.job.shuffle.input.buffer.percent</name>
+ <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
<value>0.7</value>
<description>The percentage of memory to be allocated from the maximum heap
size to storing map outputs during the shuffle.
@@ -126,7 +126,7 @@
</property>
<property>
- <name>mapred.map.output.compression.codec</name>
+ <name>mapreduce.map.output.compress.codec</name>
<value></value>
<description>If the map outputs are compressed, how should they be
compressed
@@ -134,7 +134,7 @@
</property>
<property>
- <name>mapred.output.compression.type</name>
+ <name>mapreduce.output.fileoutputformat.compress.type</name>
<value>BLOCK</value>
<description>If the job outputs are to compressed as SequenceFiles, how should
they be compressed? Should be one of NONE, RECORD or BLOCK.
@@ -142,7 +142,7 @@
</property>
<property>
- <name>mapred.job.reduce.input.buffer.percent</name>
+ <name>mapreduce.reduce.input.buffer.percent</name>
<value>0.0</value>
<description>The percentage of memory- relative to the maximum heap size- to
retain map outputs during the reduce. When the shuffle is concluded, any
@@ -163,13 +163,13 @@
<!-- copied from kryptonite configuration -->
<property>
- <name>mapred.compress.map.output</name>
+ <name>mapreduce.map.output.compress</name>
<value></value>
</property>
<property>
- <name>mapred.task.timeout</name>
+ <name>mapreduce.task.timeout</name>
<value>600000</value>
<description>The number of milliseconds before a task will be
terminated if it neither reads an input, writes an output, nor
@@ -195,22 +195,22 @@
</property>
<property>
- <name>mapred.cluster.reduce.memory.mb</name>
+ <name>mapreduce.cluster.reducememory.mb</name>
<value>2048</value>
</property>
<property>
- <name>mapred.job.map.memory.mb</name>
+ <name>mapreduce.map.memory.mb</name>
<value>1536</value>
</property>
<property>
- <name>mapred.job.reduce.memory.mb</name>
+ <name>mapreduce.reduce.memory.mb</name>
<value>1024</value>
</property>
<property>
- <name>mapred.max.tracker.blacklists</name>
+ <name>mapreduce.jobtracker.tasktracker.maxblacklists</name>
<value>16</value>
<description>
if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
@@ -218,12 +218,12 @@
</property>
<property>
- <name>mapred.healthChecker.script.path</name>
+ <name>mapreduce.tasktracker.healthchecker.script.path</name>
<value></value>
</property>
<property>
- <name>mapred.healthChecker.script.timeout</name>
+ <name>mapreduce.tasktracker.healthchecker.script.timeout</name>
<value>60000</value>
</property>
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/configuration/oozie-site.xml
index 1665ba8..5fb49ed 100644
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/configuration/oozie-site.xml
@@ -31,7 +31,7 @@
<property>
<name>oozie.system.id</name>
- <value>oozie-${user.name}</value>
+ <value>oozie-${mapreduce.job.user.name}</value>
<description>
The Oozie system ID.
</description>
@@ -133,7 +133,7 @@
<property>
<name>oozie.service.WorkflowAppService.system.libpath</name>
- <value>/user/${user.name}/share/lib</value>
+ <value>/user/${mapreduce.job.user.name}/share/lib</value>
<description>
System library path to use for workflow applications.
This path is added to workflow application if their job properties sets
@@ -242,4 +242,4 @@
Max number of connections.
</description>
</property>
-</configuration>
\ No newline at end of file
+</configuration>
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-web/app/data/HDP2/config_mapping.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2/config_mapping.js b/ambari-web/app/data/HDP2/config_mapping.js
index 0e7371b..d3e7863 100644
--- a/ambari-web/app/data/HDP2/config_mapping.js
+++ b/ambari-web/app/data/HDP2/config_mapping.js
@@ -26,15 +26,15 @@ var configs = [
"filename": "core-site.xml"
},
{
- "name": "fs.checkpoint.dir",
- "templateName": ["fs_checkpoint_dir"],
+ "name": "dfs.namenode.checkpoint.dir",
+ "templateName": ["dfs_namenode_checkpoint_dir"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "core-site.xml"
},
{
- "name": "fs.checkpoint.period",
- "templateName": ["fs_checkpoint_period"],
+ "name": "dfs.namenode.checkpoint.period",
+ "templateName": ["dfs_namenode_checkpoint_period"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "core-site.xml"
@@ -48,7 +48,7 @@ var configs = [
},
{
"name": "fs.checkpoint.edits.dir",
- "templateName": ["fs_checkpoint_dir"],
+ "templateName": ["dfs_namenode_checkpoint_dir"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "core-site.xml"
@@ -103,8 +103,8 @@ var configs = [
},
/**********************************************hdfs-site***************************************/
{
- "name": "dfs.name.dir",
- "templateName": ["dfs_name_dir"],
+ "name": "dfs.namenode.name.dir",
+ "templateName": ["dfs_namenode_name_dir"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "hdfs-site.xml"
@@ -131,8 +131,8 @@ var configs = [
"filename": "hdfs-site.xml"
},
{
- "name": "dfs.data.dir",
- "templateName": ["dfs_data_dir"],
+ "name": "dfs.datanode.data.dir",
+ "templateName": ["dfs_datanode_data_dir"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "hdfs-site.xml"
@@ -173,7 +173,7 @@ var configs = [
"filename": "hdfs-site.xml"
},
{
- "name": "dfs.http.address",
+ "name": "dfs.namenode.http-address",
"templateName": ["namenode_host"],
"foreignKey": null,
"value": "<templateName[0]>:50070",
@@ -188,14 +188,14 @@ var configs = [
},
{
- "name": "dfs.secondary.http.address",
+ "name": "dfs.namenode.secondary.http-address",
"templateName": ["snamenode_host"],
"foreignKey": null,
"value": "<templateName[0]>:50090",
"filename": "hdfs-site.xml"
},
{
- "name": "dfs.https.address",
+ "name": "dfs.https.namenode.https-address",
"templateName": ["namenode_host"],
"foreignKey": null,
"value": "<templateName[0]>:50470",
@@ -361,19 +361,18 @@ var configs = [
},
{
"name": "mapreduce.map.memory.mb",
- "templateName": ["mapred_job_map_mem_mb"],
+ "templateName": ["mapreduce_map_memory_mb"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "mapred-site.xml"
},
{
"name": "mapreduce.reduce.memory.mb",
- "templateName": ["mapred_job_red_mem_mb"],
+ "templateName": ["mapreduce_reduce_memory_mb"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "mapred-site.xml"
},
- //io.sort.mb -> mapreduce.task.io.sort.mb
{
"name": "mapreduce.task.io.sort.mb",
"templateName": ["mapreduce_task_io_sort_mb"],
@@ -382,15 +381,15 @@ var configs = [
"filename": "mapred-site.xml"
},
{
- "name": "mapred.system.dir",
- "templateName": ["mapred_system_dir"],
+ "name": "mapreduce.jobtracker.system.dir",
+ "templateName": ["mapreduce_jobtracker_system_dir"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "mapred-site.xml"
},
{
- "name": "mapred.cluster.reduce.memory.mb",
- "templateName": ["mapred_cluster_red_mem_mb"],
+ "name": "mapreduce.cluster.reducememory.mb",
+ "templateName": ["mapreduce_cluster_reducememory_mb"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "mapred-site.xml"
@@ -417,8 +416,8 @@ var configs = [
"filename": "mapred-site.xml"
},
{
- "name": "mapred.local.dir",
- "templateName": ["mapred_local_dir"],
+ "name": "mapreduce.cluster.local.dir",
+ "templateName": ["mapreduce_cluster_local_dir"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "mapred-site.xml"
@@ -431,8 +430,8 @@ var configs = [
"filename": "mapred-site.xml"
},
{
- "name": "mapred.tasktracker.map.tasks.maximum",
- "templateName": ["mapred_map_tasks_max"],
+ "name": "mapreduce.tasktracker.map.tasks.maximum",
+ "templateName": ["mapreduce_tasktracker_map_tasks_maximum"],
"foreignKey": null,
"value": "<templateName[0]>",
"filename": "mapred-site.xml"
@@ -466,7 +465,7 @@ var configs = [
"filename": "mapred-site.xml"
},
{
- "name": "mapred.healthChecker.script.path",
+ "name": "mapreduce.tasktracker.healthchecker.script.path",
"templateName": ["mapred_jobstatus_dir"],
"foreignKey": null,
"value": "<templateName[0]>",
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-web/app/data/HDP2/config_properties.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/HDP2/config_properties.js b/ambari-web/app/data/HDP2/config_properties.js
index 302fa08..e922f29 100644
--- a/ambari-web/app/data/HDP2/config_properties.js
+++ b/ambari-web/app/data/HDP2/config_properties.js
@@ -93,7 +93,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "dfs_name_dir",
+ "name": "dfs_namenode_name_dir",
"displayName": "NameNode directories",
"description": "NameNode directories for HDFS to store the file system image",
"defaultValue": "",
@@ -153,7 +153,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "fs_checkpoint_dir",
+ "name": "dfs_namenode_checkpoint_dir",
"displayName": "SecondaryNameNode Checkpoint directory",
"description": "Directory on the local filesystem where the Secondary NameNode should store the temporary images to merge",
"defaultValue": "",
@@ -184,7 +184,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "dfs_data_dir",
+ "name": "dfs_datanode_data_dir",
"displayName": "DataNode directories",
"description": "DataNode directories for HDFS to store the data blocks",
"defaultValue": "",
@@ -264,7 +264,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "fs_checkpoint_period",
+ "name": "dfs_namenode_checkpoint_period",
"displayName": "HDFS Maximum Checkpoint Delay",
"description": "Maximum delay between two consecutive checkpoints for HDFS",
"defaultValue": "21600",
@@ -458,7 +458,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "mapred_local_dir",
+ "name": "mapreduce_cluster_local_dir",
"displayName": "MapReduce local directories",
"description": "Directories for MapReduce to store intermediate data files",
"defaultValue": "",
@@ -471,7 +471,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "mapred_cluster_red_mem_mb",
+ "name": "mapreduce_cluster_reducememory_mb",
"displayName": "Cluster's Reduce slot size (virtual memory)",
"description": "The virtual memory size of a single Reduce slot in the MapReduce framework",
"defaultValue": "2048",
@@ -483,7 +483,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "mapred_map_tasks_max",
+ "name": "mapreduce_tasktracker_map_tasks_maximum",
"displayName": "Number of Map slots per node",
"description": "Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker",
"defaultValue": "4",
@@ -494,7 +494,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "mapred_job_map_mem_mb",
+ "name": "mapreduce_map_memory_mb",
"displayName": "Default virtual memory for a job's map-task",
"description": "Virtual memory for single Map task",
"defaultValue": "1536",
@@ -506,7 +506,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "mapred_job_red_mem_mb",
+ "name": "mapreduce_reduce_memory_mb",
"displayName": "Default virtual memory for a job's reduce-task",
"description": "Virtual memory for single Reduce task",
"defaultValue": "2048",
@@ -600,7 +600,7 @@ module.exports =
},
{
"id": "puppet var",
- "name": "mapred_system_dir",
+ "name": "mapreduce_jobtracker_system_dir",
"displayName": "MapReduce system directories",
"description": "",
"defaultValue": "/mapred/system",
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/28a4ae83/ambari-web/app/models/service_config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/service_config.js b/ambari-web/app/models/service_config.js
index 74c7670..3ec1f51 100644
--- a/ambari-web/app/models/service_config.js
+++ b/ambari-web/app/models/service_config.js
@@ -285,12 +285,16 @@ App.ServiceConfigProperty = Ember.Object.extend({
this.set('value', masterComponentHostsInDB.filterProperty('component', 'ZOOKEEPER_SERVER').mapProperty('hostName'));
break;
case 'dfs_name_dir':
+ case 'dfs_namenode_name_dir':
case 'dfs_data_dir':
+ case 'dfs_datanode_data_dir':
case 'yarn_nodemanager_local-dirs':
case 'mapred_local_dir':
+ case 'mapreduce_cluster_local_dir':
this.unionAllMountPoints(!isOnlyFirstOneNeeded, localDB);
break;
case 'fs_checkpoint_dir':
+ case 'dfs_namenode_checkpoint_dir':
case 'zk_data_dir':
case 'oozie_data_dir':
this.unionAllMountPoints(isOnlyFirstOneNeeded, localDB);
@@ -320,6 +324,7 @@ App.ServiceConfigProperty = Ember.Object.extend({
var temp = '';
var setOfHostNames = [];
switch (this.get('name')) {
+ case 'dfs_namenode_name_dir':
case 'dfs_name_dir':
var components = masterComponentHostsInDB.filterProperty('component', 'NAMENODE');
components.forEach(function (component) {
@@ -327,18 +332,21 @@ App.ServiceConfigProperty = Ember.Object.extend({
}, this);
break;
case 'fs_checkpoint_dir':
+ case 'dfs_namenode_checkpoint_dir':
var components = masterComponentHostsInDB.filterProperty('component', 'SECONDARY_NAMENODE');
components.forEach(function (component) {
setOfHostNames.push(component.hostName);
}, this);
break;
case 'dfs_data_dir':
+ case 'dfs_datanode_data_dir':
temp = slaveComponentHostsInDB.findProperty('componentName', 'DATANODE');
temp.hosts.forEach(function (host) {
setOfHostNames.push(host.hostName);
}, this);
break;
case 'mapred_local_dir':
+ case 'mapreduce_cluster_local_dir':
temp = slaveComponentHostsInDB.findProperty('componentName', 'TASKTRACKER') || slaveComponentHostsInDB.findProperty('componentName', 'NODEMANAGER');
temp.hosts.forEach(function (host) {
setOfHostNames.push(host.hostName);