You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2013/04/12 01:28:07 UTC
svn commit: r1467140 - in /incubator/ambari/trunk/ambari-server/src:
main/resources/stacks/HDP/1.3.0/services/GANGLIA/configuration/
main/resources/stacks/HDP/1.3.0/services/HBASE/configuration/
main/resources/stacks/HDP/1.3.0/services/HCATALOG/configu...
Author: mahadev
Date: Thu Apr 11 23:28:06 2013
New Revision: 1467140
URL: http://svn.apache.org/r1467140
Log:
AMBARI-1881. API to map global properties to services is partially complete.
Added:
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HUE/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/configuration/
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java.orig
incubator/ambari/trunk/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/global.xml
Modified:
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/GANGLIA/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HCATALOG/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HIVE/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/global.xml
incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
Modified: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/GANGLIA/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/GANGLIA/configuration/global.xml?rev=1467140&r1=1467139&r2=1467140&view=diff
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/GANGLIA/configuration/global.xml (original)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/GANGLIA/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -41,6 +41,11 @@
<value>nobody</value>
<description>User </description>
</property>
+ <property>
+ <name>gmond_user</name>
+ <value>nobody</value>
+ <description>User </description>
+ </property>
<property>
<name>rrdcached_base_dir</name>
<value>/var/lib/ganglia/rrds</value>
Modified: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml?rev=1467140&r1=1467139&r2=1467140&view=diff
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml (original)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HBASE/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -87,6 +87,11 @@
<description>HBase Region Block Multiplier</description>
</property>
<property>
+ <name>hregion_memstoreflushsize</name>
+ <value></value>
+ <description>HBase Region MemStore Flush Size.</description>
+ </property>
+ <property>
<name>client_scannercaching</name>
<value>100</value>
<description>Base Client Scanner Caching</description>
@@ -101,6 +106,11 @@
<value>10485760</value>
<description>HBase Client Maximum key-value Size</description>
</property>
+ <property>
+ <name>hbase_hdfs_root_dir</name>
+ <value>/apps/hbase/data</value>
+ <description>HBase Relative Path to HDFS.</description>
+ </property>
<property>
<name>hbase_tmp_dir</name>
<value>/var/log/hbase</value>
@@ -111,5 +121,50 @@
<value>/etc/hbase</value>
<description>Config Directory for HBase.</description>
</property>
-
+ <property>
+ <name>hdfs_enable_shortcircuit_read</name>
+ <value>true</value>
+ <description>HDFS Short Circuit Read</description>
+ </property>
+ <property>
+ <name>hdfs_enable_shortcircuit_skipchecksum</name>
+ <value>false</value>
+ <description>HDFS shortcircuit skip checksum.</description>
+ </property>
+ <property>
+ <name>hdfs_support_append</name>
+ <value>true</value>
+ <description>HDFS append support</description>
+ </property>
+ <property>
+ <name>hstore_blockingstorefiles</name>
+ <value>7</value>
+ <description>HStore blocking storefiles.</description>
+ </property>
+ <property>
+ <name>regionserver_memstore_lab</name>
+ <value>true</value>
+ <description>Region Server memstore.</description>
+ </property>
+ <property>
+ <name>regionserver_memstore_lowerlimit</name>
+ <value>0.35</value>
+ <description>Region Server memstore lower limit.</description>
+ </property>
+ <property>
+ <name>regionserver_memstore_upperlimit</name>
+ <value>0.4</value>
+ <description>Region Server memstore upper limit.</description>
+ </property>
+ <property>
+ <name>hbase_conf_dir</name>
+ <value>/etc/hbase</value>
+ <description>HBase conf dir.</description>
+ </property>
+ <property>
+ <name>hbase_user</name>
+ <value>hbase</value>
+ <description>HBase User Name.</description>
+ </property>
+
</configuration>
Modified: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HCATALOG/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HCATALOG/configuration/global.xml?rev=1467140&r1=1467139&r2=1467140&view=diff
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HCATALOG/configuration/global.xml (original)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HCATALOG/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -21,4 +21,25 @@
-->
<configuration>
+ <property>
+ <name>hcat_log_dir</name>
+ <value>/var/log/webhcat</value>
+ <description>WebHCat Log Dir.</description>
+ </property>
+ <property>
+ <name>hcat_pid_dir</name>
+ <value>/etc/run/webhcat</value>
+ <description>WebHCat Pid Dir.</description>
+ </property>
+ <property>
+ <name>hcat_user</name>
+ <value>hcat</value>
+ <description>HCat User.</description>
+ </property>
+ <property>
+ <name>webhcat_user</name>
+ <value>hcat</value>
+ <description>WebHCat User.</description>
+ </property>
+
</configuration>
Modified: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml?rev=1467140&r1=1467139&r2=1467140&view=diff
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml (original)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -112,6 +112,47 @@
<description>FS Checkpoint Size.</description>
</property>
<property>
+ <name>proxyuser_group</name>
+ <value>users</value>
+ <description>Proxy user group.</description>
+ </property>
+ <property>
+ <name>dfs_exclude</name>
+ <value></value>
+ <description>HDFS Exclude hosts.</description>
+ </property>
+ <property>
+ <name>dfs_include</name>
+ <value></value>
+ <description>HDFS Include hosts.</description>
+ </property>
+ <property>
+ <name>dfs_replication</name>
+ <value>3</value>
+ <description>Default Block Replication.</description>
+ </property>
+ <property>
+ <name>dfs_block_local_path_access_user</name>
+ <value>hbase</value>
+ <description>Default Block Replication.</description>
+ </property>
+ <property>
+ <name>dfs_datanode_address</name>
+ <value>50010</value>
+ <description>Port for datanode address.</description>
+ </property>
+ <property>
+ <name>dfs_datanode_http_address</name>
+ <value>50075</value>
+ <description>Port for datanode address.</description>
+ </property>
+ <property>
+ <name>dfs_datanode_data_dir_perm</name>
+ <value>750</value>
+ <description>Datanode dir perms.</description>
+ </property>
+
+ <property>
<name>security_enabled</name>
<value>false</value>
<description>Hadoop Security</description>
@@ -122,14 +163,30 @@
<description>Kerberos realm.</description>
</property>
<property>
- <name>kerberos_domain</name>
- <value>EXAMPLE.COM</value>
- <description>Kerberos realm.</description>
+ <name>kadmin_pw</name>
+ <value></value>
+ <description>Kerberos realm admin password</description>
</property>
<property>
<name>keytab_path</name>
<value>/etc/security/keytabs</value>
+ <description>Kerberos keytab path.</description>
+ </property>
+
+ <property>
+ <name>keytab_path</name>
+ <value>/etc/security/keytabs</value>
<description>KeyTab Directory.</description>
</property>
+ <property>
+ <name>namenode_formatted_mark_dir</name>
+ <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
+ <description>Formatteed Mark Directory.</description>
+ </property>
+ <property>
+ <name>hdfs_user</name>
+ <value>hdfs</value>
+ <description>User and Groups.</description>
+ </property>
</configuration>
Modified: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HIVE/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HIVE/configuration/global.xml?rev=1467140&r1=1467139&r2=1467140&view=diff
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HIVE/configuration/global.xml (original)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HIVE/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -26,27 +26,72 @@
<value></value>
<description>Hive Metastore host.</description>
</property>
- <property>
- <name>hivemetastore_host</name>
+ <property>
+ <name>hive_database</name>
<value></value>
- <description>Hive Metastore host.</description>
+ <description>Hive database name.</description>
</property>
<property>
- <name>hive_database</name>
+ <name>hive_existing_mysql_database</name>
<value></value>
<description>Hive database name.</description>
</property>
<property>
- <name>hive_existing_database</name>
+ <name>hive_existing_mysql_host</name>
+ <value></value>
+ <description></description>
+ </property>
+ <property>
+ <name>hive_existing_oracle_database</name>
<value></value>
<description>Hive database name.</description>
</property>
<property>
- <name>hive_existing_host</name>
+ <name>hive_existing_oracle_host</name>
<value></value>
<description></description>
</property>
<property>
+ <name>hive_ambari_database</name>
+ <value>MySQL</value>
+ <description>Database type.</description>
+ </property>
+ <property>
+ <name>hive_ambari_host</name>
+ <value></value>
+ <description>Database hostname.</description>
+ </property>
+ <property>
+ <name>hive_database_name</name>
+ <value></value>
+ <description>Database hname</description>
+ </property>
+ <property>
+ <name>hive_metastore_user_name</name>
+ <value>hive</value>
+ <description>Database username to use to connect to the database.</description>
+ </property>
+ <property>
+ <name>hive_metastore_user_passwd</name>
+ <value></value>
+ <description>Database password to use to connect to the database.</description>
+ </property>
+ <property>
+ <name>hive_metastore_port</name>
+ <value>9083</value>
+ <description>Hive Metastore port.</description>
+ </property>
+ <property>
+ <name>hive_lib</name>
+ <value>/usr/lib/hive/lib/</value>
+ <description>Hive Library.</description>
+ </property>
+ <property>
+ <name>hive_dbroot</name>
+ <value>/usr/lib/hive/lib/</value>
+ <description>Hive DB Directory.</description>
+ </property>
+ <property>
<name>hive_conf_dir</name>
<value>/etc/hive/conf</value>
<description>Hive Conf Dir.</description>
@@ -71,5 +116,10 @@
<value>/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar</value>
<description>Hive auxiliary jar path.</description>
</property>
+ <property>
+ <name>hive_user</name>
+ <value>hive</value>
+ <description>Hive User.</description>
+ </property>
</configuration>
Added: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HUE/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HUE/configuration/global.xml?rev=1467140&view=auto
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HUE/configuration/global.xml (added)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/HUE/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>hue_pid_dir</name>
+ <value>/var/run/hue</value>
+ <description>Hue Pid Dir.</description>
+ </property>
+ <property>
+ <name>hue_log_dir</name>
+ <value>/var/log/hue</value>
+ <description>Hue Log Dir.</description>
+ </property>
+
+</configuration>
Modified: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/global.xml?rev=1467140&r1=1467139&r2=1467140&view=diff
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/global.xml (original)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/MAPREDUCE/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -49,12 +49,112 @@
<property>
<name>jtnode_opt_newsize</name>
<value>200</value>
- <description>MapRed Capacity Scheduler.</description>
+ <description>Mem New Size.</description>
</property>
<property>
<name>jtnode_opt_maxnewsize</name>
<value>200</value>
- <description>MapRed Capacity Scheduler.</description>
+ <description>Max New size.</description>
+ </property>
+ <property>
+ <name>hadoop_heapsize</name>
+ <value>1024</value>
+ <description>Hadoop maximum Java heap size</description>
+ </property>
+ <property>
+ <name>jtnode_heapsize</name>
+ <value>1024</value>
+ <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
+ </property>
+ <property>
+ <name>mapred_map_tasks_max</name>
+ <value>4</value>
+ <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
+ </property>
+ <property>
+ <name>mapred_red_tasks_max</name>
+ <value>2</value>
+ <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
+ </property>
+ <property>
+ <name>mapred_cluster_map_mem_mb</name>
+ <value>-1</value>
+ <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
+ </property>
+ <property>
+ <name>mapred_cluster_red_mem_mb</name>
+ <value>-1</value>
+ <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
+ </property>
+ <property>
+ <name>mapred_job_map_mem_mb</name>
+ <value>-1</value>
+ <description>Virtual memory for single Map task</description>
+ </property>
+ <property>
+ <name>mapred_child_java_opts_sz</name>
+ <value>768</value>
+ <description>Java options for the TaskTracker child processes.</description>
+ </property>
+ <property>
+ <name>io_sort_mb</name>
+ <value>200</value>
+ <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
+ </property>
+ <property>
+ <name>io_sort_spill_percent</name>
+ <value>0.9</value>
+ <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
+ </property>
+ <property>
+ <name>mapreduce_userlog_retainhours</name>
+ <value>24</value>
+ <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
+ </property>
+ <property>
+ <name>maxtasks_per_job</name>
+ <value>-1</value>
+ <description>Maximum number of tasks for a single Job</description>
+ </property>
+ <property>
+ <name>lzo_enabled</name>
+ <value>false</value>
+ <description>LZO compression enabled</description>
+ </property>
+ <property>
+ <name>snappy_enabled</name>
+ <value>true</value>
+ <description>LZO compression enabled</description>
+ </property>
+ <property>
+ <name>rca_enabled</name>
+ <value>true</value>
+ <description>Enable Job Diagnostics.</description>
+ </property>
+ <property>
+ <name>mapred_hosts_exclude</name>
+ <value></value>
+ <description>Exclude entered hosts</description>
+ </property>
+ <property>
+ <name>mapred_hosts_include</name>
+ <value></value>
+ <description>Include entered hosts</description>
+ </property>
+ <property>
+ <name>mapred_jobstatus_dir</name>
+ <value>file:////mapred/jobstatus</value>
+ <description>Job Status directory</description>
+ </property>
+ <property>
+ <name>task_controller</name>
+ <value>org.apache.hadoop.mapred.DefaultTaskController</value>
+ <description>Task Controller.</description>
+ </property>
+ <property>
+ <name>mapred_user</name>
+ <value>mapred</value>
+ <description>MapReduce User.</description>
</property>
</configuration>
Added: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml?rev=1467140&view=auto
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml (added)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/NAGIOS/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -0,0 +1,50 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>nagios_user</name>
+ <value>nagios</value>
+ <description>Nagios Username.</description>
+ </property>
+ <property>
+ <name>nagios_group</name>
+ <value>nagios</value>
+ <description>Nagios Group.</description>
+ </property>
+ <property>
+ <name>nagios_web_login</name>
+ <value>nagiosadmin</value>
+ <description>Nagios web user.</description>
+ </property>
+ <property>
+ <name>nagios_web_password</name>
+ <value></value>
+ <description>Nagios Admin Password.</description>
+ </property>
+ <property>
+ <name>nagios_contact</name>
+ <value></value>
+ <description>Hadoop Admin Email.</description>
+ </property>
+
+</configuration>
Added: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/global.xml?rev=1467140&view=auto
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/global.xml (added)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/OOZIE/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -0,0 +1,105 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>oozie_user</name>
+ <value>oozie</value>
+ <description>Oozie User.</description>
+ </property>
+ <property>
+ <name>oozieserver_host</name>
+ <value></value>
+ <description>Oozie Server Host.</description>
+ </property>
+ <property>
+ <name>oozie_database</name>
+ <value></value>
+ <description>Oozie Server Database.</description>
+ </property>
+ <property>
+ <name>oozie_derby_database</name>
+ <value>Derby</value>
+ <description>Oozie Derby Database.</description>
+ </property>
+ <property>
+ <name>oozie_existing_mysql_database</name>
+ <value>MySQL</value>
+ <description>Oozie MySQL Database.</description>
+ </property>
+ <property>
+ <name>oozie_existing_mysql_host</name>
+ <value></value>
+ <description>Existing MySQL Host.</description>
+ </property>
+ <property>
+ <name>oozie_existing_oracle_database</name>
+ <value>Oracle</value>
+ <description>Oracle Database</description>
+ </property>
+ <property>
+ <name>oozie_existing_oracle_host</name>
+ <value></value>
+ <description>Database Host.</description>
+ </property>
+ <property>
+ <name>oozie_ambari_database</name>
+ <value>MySQL</value>
+ <description>Database default.</description>
+ </property>
+ <property>
+ <name>oozie_ambari_host</name>
+ <value></value>
+ <description>Host on which databse will be created.</description>
+ </property>
+ <property>
+ <name>oozie_database_name</name>
+ <value>oozie</value>
+ <description>Database name used for the Oozie.</description>
+ </property>
+ <property>
+ <name>oozie_metastore_user_name</name>
+ <value>oozie</value>
+ <description>Database user name to use to connect to the database</description>
+ </property>
+ <property>
+ <name>oozie_metastore_user_passwd</name>
+ <value></value>
+ <description>Database password to use to connect to the database</description>
+ </property>
+ <property>
+ <name>oozie_data_dir</name>
+ <value>/hadoop/oozie/data</value>
+ <description>Data directory in which the Oozie DB exists</description>
+ </property>
+ <property>
+ <name>oozie_log_dir</name>
+ <value>/var/log/oozie</value>
+ <description>Directory for oozie logs</description>
+ </property>
+ <property>
+ <name>oozie_pid_dir</name>
+ <value>/var/run/oozie</value>
+ <description>Directory in which the pid files for oozie reside.</description>
+ </property>
+
+</configuration>
Added: incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/configuration/global.xml?rev=1467140&view=auto
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/configuration/global.xml (added)
+++ incubator/ambari/trunk/ambari-server/src/main/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -0,0 +1,75 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>zk_user</name>
+ <value>zookeeper</value>
+ <description>ZooKeeper User.</description>
+ </property>
+ <property>
+ <name>zookeeperserver_host</name>
+ <value></value>
+ <description>ZooKeeper Server Hosts.</description>
+ </property>
+ <property>
+ <name>zk_data_dir</name>
+ <value>/hadoop/zookeeper</value>
+ <description>Data directory for ZooKeeper.</description>
+ </property>
+ <property>
+ <name>zk_log_dir</name>
+ <value>/var/log/zookeeper</value>
+ <description>ZooKeeper Log Dir</description>
+ </property>
+ <property>
+ <name>zk_pid_dir</name>
+ <value>/var/run/zookeeper</value>
+ <description>ZooKeeper Pid Dir</description>
+ </property>
+ <property>
+ <name>zk_pid_file</name>
+ <value>/var/run/zookeeper/zookeeper_server.pid</value>
+ <description>ZooKeeper Pid File</description>
+ </property>
+ <property>
+ <name>tickTime</name>
+ <value>2000</value>
+ <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
+ </property>
+ <property>
+ <name>initLimit</name>
+ <value>10</value>
+ <description>Ticks to allow for sync at Init.</description>
+ </property>
+ <property>
+ <name>syncLimit</name>
+ <value>5</value>
+ <description>Ticks to allow for sync at Runtime.</description>
+ </property>
+ <property>
+ <name>clientPort</name>
+ <value>2181</value>
+ <description>Port for running ZK Server.</description>
+ </property>
+
+</configuration>
Modified: incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java?rev=1467140&r1=1467139&r2=1467140&view=diff
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java (original)
+++ incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java Thu Apr 11 23:28:06 2013
@@ -260,6 +260,17 @@ public class AmbariMetaInfoTest {
}
}
Assert.assertTrue(checkforglobal);
+ sinfo = metaInfo.getServiceInfo("HDP",
+ "0.2", "MAPREDUCE");
+ boolean checkforhadoopheapsize = false;
+ pinfo = sinfo.getProperties();
+ for (PropertyInfo pinfol: pinfo) {
+ if ("global.xml".equals(pinfol.getFilename())) {
+ if ("hadoop_heapsize".equals(pinfol.getName()))
+ checkforhadoopheapsize = true;
+ }
+ }
+ Assert.assertTrue(checkforhadoopheapsize);
}
@Test
Added: incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java.orig
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java.orig?rev=1467140&view=auto
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java.orig (added)
+++ incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java.orig Thu Apr 11 23:28:06 2013
@@ -0,0 +1,405 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.api.services;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import junit.framework.Assert;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.StackAccessException;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.OperatingSystemInfo;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.RepositoryInfo;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.Stack;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.commons.io.FileUtils;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class AmbariMetaInfoTest {
+
+ private static String STACK_NAME_HDP = "HDP";
+ private static String STACK_VERSION_HDP = "0.1";
+ private static final String STACK_MINIMAL_VERSION_HDP = "0.0";
+ private static String SERVICE_NAME_HDFS = "HDFS";
+ private static String SERVICE_COMPONENT_NAME = "NAMENODE";
+ private static final String OS_TYPE = "centos5";
+ private static final String REPO_ID = "HDP-UTILS-1.1.0.15";
+ private static final String PROPERTY_NAME = "hbase.regionserver.msginterval";
+
+ private static final String NON_EXT_VALUE = "XXX";
+
+ private static final int REPOS_CNT = 3;
+ private static final int STACKS_NAMES_CNT = 1;
+ private static final int PROPERTIES_CNT = 63;
+ private static final int OS_CNT = 3;
+
+ private AmbariMetaInfo metaInfo = null;
+ private final static Logger LOG =
+ LoggerFactory.getLogger(AmbariMetaInfoTest.class);
+
+
+ @Rule
+ public TemporaryFolder tmpFolder = new TemporaryFolder();
+
+ @Before
+ public void before() throws Exception {
+ File stackRoot = new File("src/test/resources/stacks");
+ LOG.info("Stacks file " + stackRoot.getAbsolutePath());
+ metaInfo = new AmbariMetaInfo(stackRoot, new File("../version"));
+ try {
+ metaInfo.init();
+ } catch(Exception e) {
+ LOG.info("Error in initializing ", e);
+ }
+ }
+
+ @Test
+ public void getComponentCategory() throws AmbariException {
+ ComponentInfo componentInfo = metaInfo.getComponentCategory(STACK_NAME_HDP,
+ STACK_VERSION_HDP, SERVICE_NAME_HDFS, SERVICE_COMPONENT_NAME);
+ assertNotNull(componentInfo);
+ componentInfo = metaInfo.getComponentCategory(STACK_NAME_HDP,
+ STACK_VERSION_HDP, SERVICE_NAME_HDFS, "DATANODE1");
+ Assert.assertNotNull(componentInfo);
+ assertTrue(!componentInfo.isClient());
+ }
+
+ @Test
+ public void getComponentsByService() throws AmbariException {
+ List<ComponentInfo> components = metaInfo.getComponentsByService(
+ STACK_NAME_HDP, STACK_VERSION_HDP, SERVICE_NAME_HDFS);
+ assertNotNull(components);
+ }
+
+ @Test
+ public void getRepository() throws AmbariException {
+ Map<String, List<RepositoryInfo>> repository = metaInfo.getRepository(
+ STACK_NAME_HDP, STACK_VERSION_HDP);
+ assertNotNull(repository);
+ assertFalse(repository.get("centos5").isEmpty());
+ assertFalse(repository.get("centos6").isEmpty());
+ }
+
+ @Test
+ public void isSupportedStack() throws AmbariException {
+ boolean supportedStack = metaInfo.isSupportedStack(STACK_NAME_HDP,
+ STACK_VERSION_HDP);
+ assertTrue(supportedStack);
+
+ boolean notSupportedStack = metaInfo.isSupportedStack(NON_EXT_VALUE,
+ NON_EXT_VALUE);
+ assertFalse(notSupportedStack);
+ }
+
+ @Test
+ public void isValidService() throws AmbariException {
+ boolean valid = metaInfo.isValidService(STACK_NAME_HDP, STACK_VERSION_HDP,
+ SERVICE_NAME_HDFS);
+ assertTrue(valid);
+
+ boolean invalid = metaInfo.isValidService(STACK_NAME_HDP, NON_EXT_VALUE, NON_EXT_VALUE);
+ assertFalse(invalid);
+ }
+
+ /**
+ * Method: getSupportedConfigs(String stackName, String version, String
+ * serviceName)
+ */
+ @Test
+ public void getSupportedConfigs() throws Exception {
+
+ Map<String, Map<String, String>> configsAll = metaInfo.getSupportedConfigs(
+ STACK_NAME_HDP, STACK_VERSION_HDP, SERVICE_NAME_HDFS);
+ Set<String> filesKeys = configsAll.keySet();
+ for (String file : filesKeys) {
+ Map<String, String> configs = configsAll.get(file);
+ Set<String> propertyKeys = configs.keySet();
+ assertNotNull(propertyKeys);
+ assertNotSame(propertyKeys.size(), 0);
+ }
+ }
+
+ @Test
+ public void testServiceNameUsingComponentName() throws AmbariException {
+ String serviceName = metaInfo.getComponentToService(STACK_NAME_HDP,
+ STACK_VERSION_HDP, "NAMENODE");
+ assertTrue("HDFS".equals(serviceName));
+ }
+
+ /**
+ * Method: Map<String, ServiceInfo> getServices(String stackName, String
+ * version, String serviceName)
+ * @throws AmbariException
+ */
+ @Test
+ public void getServices() throws AmbariException {
+ Map<String, ServiceInfo> services = metaInfo.getServices(STACK_NAME_HDP,
+ STACK_VERSION_HDP);
+ LOG.info("Getting all the services ");
+ for (Map.Entry<String, ServiceInfo> entry : services.entrySet()) {
+ LOG.info("Service Name " + entry.getKey() + " values " + entry.getValue());
+ }
+ assertTrue(services.containsKey("HDFS"));
+ assertTrue(services.containsKey("MAPREDUCE"));
+ assertNotNull(services);
+ assertNotSame(services.keySet().size(), 0);
+ }
+
+ /**
+ * Method: getServiceInfo(String stackName, String version, String
+ * serviceName)
+ */
+ @Test
+ public void getServiceInfo() throws Exception {
+ ServiceInfo si = metaInfo.getServiceInfo(STACK_NAME_HDP, STACK_VERSION_HDP,
+ SERVICE_NAME_HDFS);
+ assertNotNull(si);
+ }
+
+ /**
+ * Method: getSupportedServices(String stackName, String version)
+ */
+ @Test
+ public void getSupportedServices() throws Exception {
+ List<ServiceInfo> services = metaInfo.getSupportedServices(STACK_NAME_HDP,
+ STACK_VERSION_HDP);
+ assertNotNull(services);
+ assertNotSame(services.size(), 0);
+
+ }
+
+ @Test
+ public void testGetRepos() throws Exception {
+ Map<String, List<RepositoryInfo>> repos = metaInfo.getRepository(
+ STACK_NAME_HDP, STACK_VERSION_HDP);
+ Set<String> centos5Cnt = new HashSet<String>();
+ Set<String> centos6Cnt = new HashSet<String>();
+ Set<String> redhat6cnt = new HashSet<String>();
+
+ for (List<RepositoryInfo> vals : repos.values()) {
+ for (RepositoryInfo repo : vals) {
+ LOG.debug("Dumping repo info : " + repo.toString());
+ if (repo.getOsType().equals("centos5")) {
+ centos5Cnt.add(repo.getRepoId());
+ } else if (repo.getOsType().equals("centos6")) {
+ centos6Cnt.add(repo.getRepoId());
+ } else if (repo.getOsType().equals("redhat6")) {
+ redhat6cnt.add(repo.getRepoId());
+ } else {
+ fail("Found invalid os" + repo.getOsType());
+ }
+
+ if (repo.getRepoId().equals("epel")) {
+ assertFalse(repo.getMirrorsList().isEmpty());
+ assertNull(repo.getBaseUrl());
+ } else {
+ assertNull(repo.getMirrorsList());
+ assertFalse(repo.getBaseUrl().isEmpty());
+ }
+ }
+ }
+
+ assertEquals(3, centos5Cnt.size());
+ assertEquals(3, redhat6cnt.size());
+ assertEquals(3, centos6Cnt.size());
+ }
+
+
+ @Test
+ /**
+ * Make sure global mapping is avaliable when global.xml is
+ * in the path.
+ * @throws Exception
+ */
+ public void testGlobalMapping() throws Exception {
+ ServiceInfo sinfo = metaInfo.getServiceInfo("HDP",
+ "0.2", "HDFS");
+ List<PropertyInfo> pinfo = sinfo.getProperties();
+ /** check all the config knobs and make sure the global one is there **/
+ boolean checkforglobal = false;
+
+ for (PropertyInfo pinfol: pinfo) {
+ if ("global.xml".equals(pinfol.getFilename())) {
+ checkforglobal = true;
+ }
+ }
+ Assert.assertTrue(checkforglobal);
+ }
+
+ @Test
+ public void testMetaInfoFileFilter() throws Exception {
+ String buildDir = tmpFolder.getRoot().getAbsolutePath();
+ File stackRoot = new File("src/test/resources/stacks");
+ File stackRootTmp = new File(buildDir + "/ambari-metaInfo"); stackRootTmp.mkdir();
+ FileUtils.copyDirectory(stackRoot, stackRootTmp);
+ AmbariMetaInfo ambariMetaInfo = new AmbariMetaInfo(stackRootTmp, new File("../version"));
+ File f1, f2, f3;
+ f1 = new File(stackRootTmp.getAbsolutePath() + "/001.svn"); f1.createNewFile();
+ f2 = new File(stackRootTmp.getAbsolutePath() + "/abcd.svn/001.svn"); f2.mkdirs(); f2.createNewFile();
+ f3 = new File(stackRootTmp.getAbsolutePath() + "/.svn");
+ if (!f3.exists()) {
+ f3.createNewFile();
+ }
+ ambariMetaInfo.init();
+ // Tests the stack is loaded as expected
+ getServices();
+ getComponentsByService();
+ getComponentCategory();
+ getSupportedConfigs();
+ // Check .svn is not part of the stack but abcd.svn is
+ Assert.assertNotNull(ambariMetaInfo.getStackInfo("abcd.svn", "001.svn"));
+
+ Assert.assertFalse(ambariMetaInfo.isSupportedStack(".svn", ""));
+ Assert.assertFalse(ambariMetaInfo.isSupportedStack(".svn", ""));
+ }
+
+ @Test
+ public void testGetComponent() throws Exception {
+ ComponentInfo component = metaInfo.getComponent(STACK_NAME_HDP,
+ STACK_VERSION_HDP, SERVICE_NAME_HDFS, SERVICE_COMPONENT_NAME);
+ Assert.assertEquals(component.getName(), SERVICE_COMPONENT_NAME);
+
+ try {
+ metaInfo.getComponent(STACK_NAME_HDP,
+ STACK_VERSION_HDP, SERVICE_NAME_HDFS, NON_EXT_VALUE);
+ } catch (StackAccessException e) {
+ Assert.assertTrue(e instanceof StackAccessException);
+ }
+
+ }
+
+ @Test
+ public void testGetRepositories() throws Exception {
+ List<RepositoryInfo> repositories = metaInfo.getRepositories(STACK_NAME_HDP, STACK_VERSION_HDP, OS_TYPE);
+ Assert.assertEquals(repositories.size(), REPOS_CNT);
+ }
+
+ @Test
+ public void testGetRepository() throws Exception {
+ RepositoryInfo repository = metaInfo.getRepository(STACK_NAME_HDP, STACK_VERSION_HDP, OS_TYPE, REPO_ID);
+ Assert.assertEquals(repository.getRepoId(), REPO_ID);
+
+ try {
+ metaInfo.getRepository(STACK_NAME_HDP, STACK_VERSION_HDP, OS_TYPE, NON_EXT_VALUE);
+ } catch (StackAccessException e) {
+ Assert.assertTrue(e instanceof StackAccessException);
+ }
+ }
+
+ @Test
+ public void testGetService() throws Exception {
+ ServiceInfo service = metaInfo.getService(STACK_NAME_HDP, STACK_VERSION_HDP, SERVICE_NAME_HDFS);
+ Assert.assertEquals(service.getName(), SERVICE_NAME_HDFS);
+ try {
+ metaInfo.getService(STACK_NAME_HDP, STACK_VERSION_HDP, NON_EXT_VALUE);
+ } catch (StackAccessException e) {
+ Assert.assertTrue(e instanceof StackAccessException);
+ }
+
+ }
+
+ @Test
+ public void testGetStacksNames() throws Exception {
+ Set<Stack> stackNames = metaInfo.getStackNames();
+ assertEquals(stackNames.size(), STACKS_NAMES_CNT);
+ assertTrue(stackNames.contains(new Stack(STACK_NAME_HDP)));
+ }
+
+ @Test
+ public void testGetStack() throws Exception {
+ Stack stack = metaInfo.getStack(STACK_NAME_HDP);
+ Assert.assertEquals(stack.getStackName(), STACK_NAME_HDP);
+ try {
+ metaInfo.getStack(NON_EXT_VALUE);
+ } catch (StackAccessException e) {
+ Assert.assertTrue(e instanceof StackAccessException);
+ }
+ }
+
+ @Test
+ public void testGetStackInfo() throws Exception {
+ StackInfo stackInfo = metaInfo.getStackInfo(STACK_NAME_HDP, STACK_VERSION_HDP);
+ Assert.assertEquals(stackInfo.getName(), STACK_NAME_HDP);
+ Assert.assertEquals(stackInfo.getVersion(), STACK_VERSION_HDP);
+ Assert.assertEquals(stackInfo.getMinUpgradeVersion(), STACK_MINIMAL_VERSION_HDP);
+ try {
+ metaInfo.getStackInfo(STACK_NAME_HDP, NON_EXT_VALUE);
+ } catch (StackAccessException e) {
+ Assert.assertTrue(e instanceof StackAccessException);
+ }
+ }
+
+ @Test
+ public void testGetProperties() throws Exception {
+ Set<PropertyInfo> properties = metaInfo.getProperties(STACK_NAME_HDP, STACK_VERSION_HDP, SERVICE_NAME_HDFS);
+ Assert.assertEquals(properties.size(), PROPERTIES_CNT);
+ }
+
+ @Test
+ public void testGetProperty() throws Exception {
+ PropertyInfo property = metaInfo.getProperty(STACK_NAME_HDP, STACK_VERSION_HDP, SERVICE_NAME_HDFS, PROPERTY_NAME);
+ Assert.assertEquals(property.getName(), PROPERTY_NAME);
+
+ try {
+ metaInfo.getProperty(STACK_NAME_HDP, STACK_VERSION_HDP, SERVICE_NAME_HDFS, NON_EXT_VALUE);
+ } catch (StackAccessException e) {
+ Assert.assertTrue(e instanceof StackAccessException);
+ }
+
+ }
+
+ @Test
+ public void testGetOperatingSystems() throws Exception {
+ Set<OperatingSystemInfo> operatingSystems = metaInfo.getOperatingSystems(STACK_NAME_HDP, STACK_VERSION_HDP);
+ Assert.assertEquals(operatingSystems.size(), OS_CNT);
+ }
+
+ @Test
+ public void testGetOperatingSystem() throws Exception {
+ OperatingSystemInfo operatingSystem = metaInfo.getOperatingSystem(STACK_NAME_HDP, STACK_VERSION_HDP, OS_TYPE);
+ Assert.assertEquals(operatingSystem.getOsType(), OS_TYPE);
+
+
+ try {
+ metaInfo.getOperatingSystem(STACK_NAME_HDP, STACK_VERSION_HDP, NON_EXT_VALUE);
+ } catch (StackAccessException e) {
+ Assert.assertTrue(e instanceof StackAccessException);
+ }
+ }
+}
Added: incubator/ambari/trunk/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/global.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/global.xml?rev=1467140&view=auto
==============================================================================
--- incubator/ambari/trunk/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/global.xml (added)
+++ incubator/ambari/trunk/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/global.xml Thu Apr 11 23:28:06 2013
@@ -0,0 +1,160 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>jobtracker_host</name>
+ <value></value>
+ <description>JobTracker Host.</description>
+ </property>
+ <property>
+ <name>tasktracker_hosts</name>
+ <value></value>
+ <description>TaskTracker hosts.</description>
+ </property>
+ <property>
+ <name>mapred_local_dir</name>
+ <value>/hadoop/mapred</value>
+ <description>MapRed Local Directories.</description>
+ </property>
+ <property>
+ <name>mapred_system_dir</name>
+ <value>/mapred/system</value>
+ <description>MapRed System Directories.</description>
+ </property>
+ <property>
+ <name>scheduler_name</name>
+ <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
+ <description>MapRed Capacity Scheduler.</description>
+ </property>
+ <property>
+ <name>jtnode_opt_newsize</name>
+ <value>200</value>
+ <description>Mem New Size.</description>
+ </property>
+ <property>
+ <name>jtnode_opt_maxnewsize</name>
+ <value>200</value>
+ <description>Max New size.</description>
+ </property>
+ <property>
+ <name>hadoop_heapsize</name>
+ <value>1024</value>
+ <description>Hadoop maximum Java heap size</description>
+ </property>
+ <property>
+ <name>jtnode_heapsize</name>
+ <value>1024</value>
+ <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
+ </property>
+ <property>
+ <name>mapred_map_tasks_max</name>
+ <value>4</value>
+ <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
+ </property>
+ <property>
+ <name>mapred_red_tasks_max</name>
+ <value>2</value>
+ <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
+ </property>
+ <property>
+ <name>mapred_cluster_map_mem_mb</name>
+ <value>-1</value>
+ <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
+ </property>
+ <property>
+ <name>mapred_cluster_red_mem_mb</name>
+ <value>-1</value>
+ <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
+ </property>
+ <property>
+ <name>mapred_job_map_mem_mb</name>
+ <value>-1</value>
+ <description>Virtual memory for single Map task</description>
+ </property>
+ <property>
+ <name>mapred_child_java_opts_sz</name>
+ <value>768</value>
+ <description>Java options for the TaskTracker child processes.</description>
+ </property>
+ <property>
+ <name>io_sort_mb</name>
+ <value>200</value>
+ <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
+ </property>
+ <property>
+ <name>io_sort_spill_percent</name>
+ <value>0.9</value>
+ <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
+ </property>
+ <property>
+ <name>mapreduce_userlog_retainhours</name>
+ <value>24</value>
+ <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
+ </property>
+ <property>
+ <name>maxtasks_per_job</name>
+ <value>-1</value>
+ <description>Maximum number of tasks for a single Job</description>
+ </property>
+ <property>
+ <name>lzo_enabled</name>
+ <value>false</value>
+ <description>LZO compression enabled</description>
+ </property>
+ <property>
+ <name>snappy_enabled</name>
+ <value>true</value>
+ <description>LZO compression enabled</description>
+ </property>
+ <property>
+ <name>rca_enabled</name>
+ <value>true</value>
+ <description>Enable Job Diagnostics.</description>
+ </property>
+ <property>
+ <name>mapred_hosts_exclude</name>
+ <value></value>
+ <description>Exclude entered hosts</description>
+ </property>
+ <property>
+ <name>mapred_hosts_include</name>
+ <value></value>
+ <description>Include entered hosts</description>
+ </property>
+ <property>
+ <name>mapred_jobstatus_dir</name>
+ <value>file:////mapred/jobstatus</value>
+ <description>Job Status directory</description>
+ </property>
+ <property>
+ <name>task_controller</name>
+ <value>org.apache.hadoop.mapred.DefaultTaskController</value>
+ <description>Task Controller.</description>
+ </property>
+ <property>
+ <name>mapred_user</name>
+ <value>mapred</value>
+ <description>MapReduce User.</description>
+ </property>
+
+</configuration>