You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by tt...@apache.org on 2017/11/24 20:04:49 UTC
[1/2] ambari git commit: AMBARI-20891 - Allow extensions to auto-link
with supported stack versions
Repository: ambari
Updated Branches:
refs/heads/branch-2.6 0e2e711e0 -> 7c56924a9
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml
new file mode 100644
index 0000000..5024e85
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.regionserver.msginterval</name>
+ <value>1000</value>
+ <description>Interval between messages from the RegionServer to HMaster
+ in milliseconds. Default is 15. Set this value low if you want unit
+ tests to be responsive.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.pause</name>
+ <value>5000</value>
+ <description>General client pause value. Used mostly as value to wait
+ before running a retry of a failed get, region lookup, etc.</description>
+ </property>
+ <property>
+ <name>hbase.master.meta.thread.rescanfrequency</name>
+ <value>10000</value>
+ <description>How long the HMaster sleeps (in milliseconds) between scans of
+ the root and meta tables.
+ </description>
+ </property>
+ <property>
+ <name>hbase.server.thread.wakefrequency</name>
+ <value>1000</value>
+ <description>Time to sleep in between searches for work (in milliseconds).
+ Used as sleep interval by service threads such as META scanner and log roller.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>5</value>
+ <description>Count of RPC Server instances spun up on RegionServers
+ Same property is used by the HMaster for count of master handlers.
+ Default is 10.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.period</name>
+ <value>6000</value>
+ <description>Length of time the master will wait before timing out a region
+ server lease. Since region servers report in every second (see above), this
+ value has been reduced so that the master will notice a dead region server
+ sooner. The default is 30 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase master web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase regionserver web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port.auto</name>
+ <value>true</value>
+ <description>Info server auto port bind. Enables automatic port
+ search if hbase.regionserver.info.port is already in use.
+ Enabled for testing to run multiple tests on one machine.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.thread.wakefrequency</name>
+ <value>3000</value>
+ <description>The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.optionalcacheflushinterval</name>
+ <value>10000</value>
+ <description>
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush. Default 60,000.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.safemode</name>
+ <value>false</value>
+ <description>
+ Turn on/off safe mode in region server. Always on for production, always off
+ for tests.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.max.filesize</name>
+ <value>67108864</value>
+ <description>
+ Maximum desired file size for an HRegion. If filesize exceeds
+ value + (value / 2), the HRegion is split in two. Default: 256M.
+
+ Keep the maximum filesize small so we split more often in tests.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.log.dir</name>
+ <value>${user.dir}/../logs</value>
+ </property>
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>21818</value>
+ <description>Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+ </description>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..649472d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+ <property>
+ <name>content</name>
+ <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+ </value>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..2b979d7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+ <property>
+ <name>dfs.name.dir</name>
+ <!-- cluster variant -->
+ <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+ <description>Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy. </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>to enable dfs append</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>false</value>
+ <description>to enable webhdfs</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description>#of failed disks dn would tolerate</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.block.local-path-access.user</name>
+ <value>hbase</value>
+ <description>the user who is allowed to perform short
+ circuit reads.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.data.dir</name>
+ <value>/mnt/hmc/hadoop/hdfs/data</value>
+ <description>Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>/etc/hadoop/conf/dfs.exclude</value>
+ <description>Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.</description>
+ </property>
+
+ <property>
+ <name>dfs.hosts</name>
+ <value>/etc/hadoop/conf/dfs.include</value>
+ <description>Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.</description>
+ </property>
+
+ <property>
+ <name>dfs.replication.max</name>
+ <value>50</value>
+ <description>Maximal block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication</name>
+ <value>3</value>
+ <description>Default block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.safemode.threshold.pct</name>
+ <value>1.0f</value>
+ <description>
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>6250000</value>
+ <description>
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50010</value>
+ </property>
+
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50075</value>
+ </property>
+
+ <property>
+ <name>dfs.block.size</name>
+ <value>134217728</value>
+ <description>The default block size for new files.</description>
+ </property>
+
+ <property>
+ <name>dfs.http.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system. Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+ <description>
+ Kerberos principal name for the secondary NameNode.
+ </description>
+ </property>
+
+
+<!--
+ This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+ <property>
+ <name>dfs.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <!-- cluster variant -->
+ <name>dfs.secondary.http.address</name>
+ <value>hdp2.cybervisiontech.com.ua:50090</value>
+ <description>Address of secondary namenode web server</description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.https.port</name>
+ <value>50490</value>
+ <description>The https port where secondary-namenode binds</description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@</value>
+ <description>
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.keytab</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.kerberos.principal</name>
+ <value>dn/_HOST@</value>
+ <description>
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.keytab.file</name>
+ <value>/dn.service.keytab</value>
+ <description>
+ The filename of the keytab file for the DataNode.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.https.port</name>
+ <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.https.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50470</value>
+ <description>The https address where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir.perm</name>
+ <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+ </property>
+
+ <property>
+ <name>dfs.access.time.precision</name>
+ <value>0</value>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+ <name>ipc.server.read.threadpool.size</name>
+ <value>5</value>
+ <description></description>
+</property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..da61660
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HDFS</name>
+ <extends>common-services/HDFS/1.0</extends>
+ <configuration-dependencies>
+ <config-type>core-site</config-type>
+ <config-type>global</config-type>
+ <config-type>hdfs-site</config-type>
+ <config-type>hadoop-policy</config-type>
+ <config-type>hdfs-log4j</config-type>
+ </configuration-dependencies>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..9c122b2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HIVE</name>
+ <extends>common-services/HIVE/1.0</extends>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 0000000..3b0b3d9
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>MAPREDUCE</name>
+ <extends>common-services/MAPREDUCE/1.0</extends>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..9c8a299
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>ZOOKEEPER</name>
+ <extends>common-services/ZOOKEEPER/1.0</extends>
+ </service>
+ </services>
+</metainfo>
[2/2] ambari git commit: AMBARI-20891 - Allow extensions to auto-link
with supported stack versions
Posted by tt...@apache.org.
AMBARI-20891 - Allow extensions to auto-link with supported stack versions
Conflicts:
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7c56924a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7c56924a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7c56924a
Branch: refs/heads/branch-2.6
Commit: 7c56924a952358127dd80eef7e8c9dfeac0aa8b0
Parents: 0e2e711
Author: Tim Thorpe <tt...@apache.org>
Authored: Tue May 9 07:53:39 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Fri Nov 24 12:00:57 2017 -0800
----------------------------------------------------------------------
.../AmbariManagementControllerImpl.java | 68 +---
.../controller/AmbariManagementHelper.java | 175 ++++++++
.../ambari/server/orm/dao/ExtensionLinkDAO.java | 36 +-
.../orm/entities/ExtensionLinkEntity.java | 1 +
.../ambari/server/stack/ExtensionModule.java | 2 +
.../ambari/server/stack/StackManager.java | 81 +++-
.../apache/ambari/server/stack/StackModule.java | 4 +-
.../ambari/server/state/ExtensionInfo.java | 26 +-
.../apache/ambari/server/state/StackInfo.java | 27 +-
.../state/stack/ExtensionMetainfoXml.java | 11 +
.../stack/StackManagerCommonServicesTest.java | 4 +-
.../server/stack/StackManagerExtensionTest.java | 79 ++--
.../server/stack/StackManagerMiscTest.java | 13 +-
.../ambari/server/stack/StackManagerMock.java | 5 +-
.../ambari/server/stack/StackManagerTest.java | 13 +-
.../resources/extensions/EXT/0.1/metainfo.xml | 2 +-
.../resources/extensions/EXT/0.2/metainfo.xml | 3 +-
.../resources/extensions/EXT/0.3/metainfo.xml | 32 ++
.../EXT/0.3/services/OOZIE2/metainfo.xml | 118 ++++++
.../services/OOZIE2/themes/broken_theme.json | 3 +
.../stacks_with_extensions/HDP/0.3/metainfo.xml | 22 ++
.../HDP/0.3/repos/repoinfo.xml | 63 +++
.../HDP/0.3/services/HBASE/metainfo.xml | 26 ++
.../0.3/services/HDFS/configuration/global.xml | 145 +++++++
.../services/HDFS/configuration/hadoop-env.xml | 223 +++++++++++
.../services/HDFS/configuration/hbase-site.xml | 137 +++++++
.../services/HDFS/configuration/hdfs-log4j.xml | 199 ++++++++++
.../services/HDFS/configuration/hdfs-site.xml | 396 +++++++++++++++++++
.../HDP/0.3/services/HDFS/metainfo.xml | 30 ++
.../0.3/services/HDFS/package/dummy-script.py | 20 +
.../HDP/0.3/services/HIVE/metainfo.xml | 26 ++
.../HDP/0.3/services/MAPREDUCE/metainfo.xml | 23 ++
.../HDP/0.3/services/ZOOKEEPER/metainfo.xml | 26 ++
33 files changed, 1921 insertions(+), 118 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index a6315f8..585ee46 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -122,14 +122,12 @@ import org.apache.ambari.server.orm.dao.StackDAO;
import org.apache.ambari.server.orm.dao.WidgetDAO;
import org.apache.ambari.server.orm.dao.WidgetLayoutDAO;
import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ExtensionEntity;
import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
import org.apache.ambari.server.orm.entities.HostEntity;
import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
import org.apache.ambari.server.orm.entities.RepositoryEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.SettingEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.WidgetEntity;
import org.apache.ambari.server.orm.entities.WidgetLayoutEntity;
import org.apache.ambari.server.orm.entities.WidgetLayoutUserWidgetEntity;
@@ -313,11 +311,13 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
private MaintenanceStateHelper maintenanceStateHelper;
- @Inject
- private ExtensionLinkDAO linkDAO;
+ private AmbariManagementHelper helper;
+
@Inject
private ExtensionDAO extensionDAO;
@Inject
+ private ExtensionLinkDAO linkDAO;
+ @Inject
private StackDAO stackDAO;
@Inject
protected OsFamily osFamily;
@@ -401,6 +401,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
mysqljdbcUrl = null;
serverDB = null;
}
+ helper = new AmbariManagementHelper(stackDAO, extensionDAO, linkDAO);
}
@Override
@@ -1825,7 +1826,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
cluster.setCurrentStackVersion(desiredVersion);
}
// Stack Upgrade: unlike the workflow for creating a cluster, updating a cluster via the API will not
- // create any ClusterVersionEntity changes because those have to go through the Stack Upgrade process.
boolean requiresHostListUpdate =
request.getHostNames() != null && !request.getHostNames().isEmpty();
@@ -5632,7 +5632,13 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
*/
@Override
public void createExtensionLink(ExtensionLinkRequest request) throws AmbariException {
- validateCreateExtensionLinkRequest(request);
+ if (StringUtils.isBlank(request.getStackName())
+ || StringUtils.isBlank(request.getStackVersion())
+ || StringUtils.isBlank(request.getExtensionName())
+ || StringUtils.isBlank(request.getExtensionVersion())) {
+
+ throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
+ }
StackInfo stackInfo = ambariMetaInfo.getStack(request.getStackName(), request.getStackVersion());
@@ -5646,24 +5652,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
throw new StackAccessException("extensionName=" + request.getExtensionName() + ", extensionVersion=" + request.getExtensionVersion());
}
- ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
- ExtensionLinkEntity linkEntity = createExtensionLinkEntity(request);
- ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
-
- try {
- linkDAO.create(linkEntity);
- linkEntity = linkDAO.merge(linkEntity);
- } catch (RollbackException e) {
- String message = "Unable to create extension link";
- LOG.debug(message, e);
- String errorMessage = message
- + ", stackName=" + request.getStackName()
- + ", stackVersion=" + request.getStackVersion()
- + ", extensionName=" + request.getExtensionName()
- + ", extensionVersion=" + request.getExtensionVersion();
- LOG.warn(errorMessage);
- throw new AmbariException(errorMessage, e);
- }
+ helper.createExtensionLink(ambariMetaInfo.getStackManager(), stackInfo, extensionInfo);
}
/**
@@ -5714,37 +5703,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
}
- private void validateCreateExtensionLinkRequest(ExtensionLinkRequest request) throws AmbariException {
- if (request.getStackName() == null
- || request.getStackVersion() == null
- || request.getExtensionName() == null
- || request.getExtensionVersion() == null) {
-
- throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
- }
-
- ExtensionLinkEntity entity = linkDAO.findByStackAndExtension(request.getStackName(), request.getStackVersion(),
- request.getExtensionName(), request.getExtensionVersion());
-
- if (entity != null) {
- throw new AmbariException("The stack and extension are already linked"
- + ", stackName=" + request.getStackName()
- + ", stackVersion=" + request.getStackVersion()
- + ", extensionName=" + request.getExtensionName()
- + ", extensionVersion=" + request.getExtensionVersion());
- }
- }
-
- private ExtensionLinkEntity createExtensionLinkEntity(ExtensionLinkRequest request) throws AmbariException {
- StackEntity stack = stackDAO.find(request.getStackName(), request.getStackVersion());
- ExtensionEntity extension = extensionDAO.find(request.getExtensionName(), request.getExtensionVersion());
-
- ExtensionLinkEntity linkEntity = new ExtensionLinkEntity();
- linkEntity.setStack(stack);
- linkEntity.setExtension(extension);
- return linkEntity;
- }
-
@Override
public QuickLinkVisibilityController getQuicklinkVisibilityController() {
SettingEntity entity = settingDAO.findByName(QuickLinksProfile.SETTING_NAME_QUICKLINKS_PROFILE);
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
new file mode 100644
index 0000000..2dd6f12
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import javax.persistence.RollbackException;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.orm.dao.ExtensionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.stack.ExtensionHelper;
+import org.apache.ambari.server.stack.StackManager;
+import org.apache.ambari.server.state.ExtensionInfo;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+import org.apache.ambari.server.utils.VersionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+@Singleton
+public class AmbariManagementHelper {
+
+ private final static Logger LOG =
+ LoggerFactory.getLogger(AmbariManagementHelper.class);
+
+ private ExtensionLinkDAO linkDAO;
+ private ExtensionDAO extensionDAO;
+ private StackDAO stackDAO;
+
+ @Inject
+ public AmbariManagementHelper(StackDAO stackDAO, ExtensionDAO extensionDAO, ExtensionLinkDAO linkDAO) {
+ this.stackDAO = stackDAO;
+ this.extensionDAO = extensionDAO;
+ this.linkDAO = linkDAO;
+ }
+
+ /**
+ * This method will create a link between an extension version and a stack version (Extension Link).
+ *
+ * An extension version is like a stack version but it contains custom services. Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+ public void createExtensionLink(StackManager stackManager, StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
+ validateCreateExtensionLinkRequest(stackInfo, extensionInfo);
+ ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
+ ExtensionLinkEntity linkEntity = createExtensionLinkEntity(stackInfo, extensionInfo);
+ stackManager.linkStackToExtension(stackInfo, extensionInfo);
+
+ try {
+ linkDAO.create(linkEntity);
+ linkEntity = linkDAO.merge(linkEntity);
+ } catch (RollbackException e) {
+ String message = "Unable to create extension link";
+ LOG.debug(message, e);
+ String errorMessage = message
+ + ", stackName=" + stackInfo.getName()
+ + ", stackVersion=" + stackInfo.getVersion()
+ + ", extensionName=" + extensionInfo.getName()
+ + ", extensionVersion=" + extensionInfo.getVersion();
+ LOG.warn(errorMessage);
+ throw new AmbariException(errorMessage, e);
+ }
+ }
+
+ /**
+ * This method will create a link between an extension version and a stack version (Extension Link).
+ *
+ * An extension version is like a stack version but it contains custom services. Linking an extension
+ * version to the current stack version allows the cluster to install the custom services contained in
+ * the extension version.
+ */
+ public void createExtensionLinks(StackManager stackManager, List<ExtensionInfo> extensions) throws AmbariException {
+ Map<String, List<StackInfo>> stackMap = stackManager.getStacksByName();
+ for (List<StackInfo> stacks : stackMap.values()) {
+ Collections.sort(stacks);
+ Collections.reverse(stacks);
+ }
+
+ Collections.sort(extensions);
+ Collections.reverse(extensions);
+ for (ExtensionInfo extension : extensions) {
+ if (extension.isActive() && extension.isAutoLink()) {
+ LOG.debug("Autolink - looking for matching stack versions for extension:{}/{} ", extension.getName(), extension.getVersion());
+ for (ExtensionMetainfoXml.Stack supportedStack : extension.getStacks()) {
+ List<StackInfo> stacks = stackMap.get(supportedStack.getName());
+ for (StackInfo stack : stacks) {
+ // If the stack version is not currently linked to a version of the extension and it meets the minimum stack version then link them
+ if (stack.getExtension(extension.getName()) == null && VersionUtils.compareVersions(stack.getVersion(), supportedStack.getVersion()) > -1) {
+ LOG.debug("Autolink - extension: {}/{} stack: {}/{}", extension.getName(), extension.getVersion(),
+ stack.getName(), stack.getVersion());
+ createExtensionLink(stackManager, stack, extension);
+ }
+ else {
+ LOG.debug("Autolink - not a match extension: {}/{} stack: {}/{}", extension.getName(), extension.getVersion(),
+ stack.getName(), stack.getVersion());
+ }
+ }
+ }
+ }
+ else {
+ LOG.debug("Autolink - skipping extension: {}/{}. It is either not active or set to autolink.", extension.getName(), extension.getVersion());
+ }
+ }
+ }
+
+ /**
+ * Validates the stackInfo and extensionInfo parameters are valid.
+ * If they are then it confirms that the stack and extension are not already linked.
+ */
+ private void validateCreateExtensionLinkRequest(StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
+ if (stackInfo == null) {
+ throw new IllegalArgumentException("Stack should be provided");
+ }
+ if (extensionInfo == null) {
+ throw new IllegalArgumentException("Extension should be provided");
+ }
+ if (StringUtils.isBlank(stackInfo.getName())
+ || StringUtils.isBlank(stackInfo.getVersion())
+ || StringUtils.isBlank(extensionInfo.getName())
+ || StringUtils.isBlank(extensionInfo.getVersion())) {
+
+ throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
+ }
+
+ ExtensionLinkEntity entity = linkDAO.findByStackAndExtension(stackInfo.getName(), stackInfo.getVersion(),
+ extensionInfo.getName(), extensionInfo.getVersion());
+
+ if (entity != null) {
+ throw new AmbariException("The stack and extension are already linked"
+ + ", stackName=" + stackInfo.getName()
+ + ", stackVersion=" + stackInfo.getVersion()
+ + ", extensionName=" + extensionInfo.getName()
+ + ", extensionVersion=" + extensionInfo.getVersion());
+ }
+ }
+
+ private ExtensionLinkEntity createExtensionLinkEntity(StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
+ StackEntity stack = stackDAO.find(stackInfo.getName(), stackInfo.getVersion());
+ ExtensionEntity extension = extensionDAO.find(extensionInfo.getName(), extensionInfo.getVersion());
+
+ ExtensionLinkEntity linkEntity = new ExtensionLinkEntity();
+ linkEntity.setStack(stack);
+ linkEntity.setExtension(extension);
+ return linkEntity;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
index d90480b..85c5722 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
@@ -74,16 +74,19 @@ public class ExtensionLinkDAO {
}
String stackName = request.getStackName();
- String stackVersion = request.getStackName();
- String extensionName = request.getStackName();
- String extensionVersion = request.getStackName();
+ String stackVersion = request.getStackVersion();
+ String extensionName = request.getExtensionName();
+ String extensionVersion = request.getExtensionVersion();
if (stackName != null && stackVersion != null) {
- if (extensionName != null && extensionVersion != null) {
- ExtensionLinkEntity entity = findByStackAndExtension(stackName, stackVersion, extensionName, extensionVersion);
- List<ExtensionLinkEntity> list = new ArrayList<ExtensionLinkEntity>();
- list.add(entity);
- return list;
+ if (extensionName != null) {
+ if (extensionVersion != null) {
+ ExtensionLinkEntity entity = findByStackAndExtension(stackName, stackVersion, extensionName, extensionVersion);
+ List<ExtensionLinkEntity> list = new ArrayList<>();
+ list.add(entity);
+ return list;
+ }
+ return findByStackAndExtensionName(stackName, stackVersion, extensionName);
}
return findByStack(stackName, stackVersion);
}
@@ -153,6 +156,23 @@ public class ExtensionLinkDAO {
}
/**
+ * Gets the extension link that match the specified stack name, stack version and extension name.
+ *
+ * @return the extension link matching the specified stack name, stack version and extension name if any.
+ */
+ @RequiresSession
+ public List<ExtensionLinkEntity> findByStackAndExtensionName(String stackName, String stackVersion, String extensionName) {
+ TypedQuery<ExtensionLinkEntity> query = entityManagerProvider.get().createNamedQuery(
+ "ExtensionLinkEntity.findByStackAndExtensionName", ExtensionLinkEntity.class);
+
+ query.setParameter("stackName", stackName);
+ query.setParameter("stackVersion", stackVersion);
+ query.setParameter("extensionName", extensionName);
+
+ return daoUtils.selectList(query);
+ }
+
+ /**
* Gets the extension link that match the specified stack name, stack version, extension name and extension version.
*
* @return the extension link matching the specified stack name, stack version, extension name and extension version if any.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
index 12b3ce0..e2b48bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
@@ -42,6 +42,7 @@ import javax.persistence.UniqueConstraint;
@TableGenerator(name = "link_id_generator", table = "ambari_sequences", pkColumnName = "sequence_name", valueColumnName = "sequence_value", pkColumnValue = "link_id_seq", initialValue = 0)
@NamedQueries({
@NamedQuery(name = "ExtensionLinkEntity.findAll", query = "SELECT link FROM ExtensionLinkEntity link"),
+ @NamedQuery(name = "ExtensionLinkEntity.findByStackAndExtensionName", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion AND link.extension.extensionName = :extensionName"),
@NamedQuery(name = "ExtensionLinkEntity.findByStackAndExtension", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion AND link.extension.extensionName = :extensionName AND link.extension.extensionVersion = :extensionVersion"),
@NamedQuery(name = "ExtensionLinkEntity.findByStack", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion"),
@NamedQuery(name = "ExtensionLinkEntity.findByExtension", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.extension.extensionName = :extensionName AND link.extension.extensionVersion = :extensionVersion") })
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
index d425f9a..5c3c60e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
@@ -399,6 +399,8 @@ public class ExtensionModule extends BaseModule<ExtensionModule, ExtensionInfo>
extensionInfo.setParentExtensionVersion(emx.getExtends());
extensionInfo.setStacks(emx.getStacks());
extensionInfo.setExtensions(emx.getExtensions());
+ extensionInfo.setActive(emx.getVersion().isActive());
+ extensionInfo.setAutoLink(emx.isAutoLink());
}
try {
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
index 749a95e..ff0a016 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
@@ -20,11 +20,13 @@ package org.apache.ambari.server.stack;
import java.io.File;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import javax.annotation.Nullable;
import javax.xml.XMLConstants;
@@ -35,6 +37,7 @@ import javax.xml.validation.Validator;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
import org.apache.ambari.server.metadata.ActionMetadata;
import org.apache.ambari.server.orm.dao.ExtensionDAO;
import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -57,7 +60,6 @@ import org.xml.sax.SAXParseException;
import com.google.inject.assistedinject.Assisted;
import com.google.inject.assistedinject.AssistedInject;
-
/**
* Manages all stack related behavior including parsing of stacks and providing access to
* stack information.
@@ -110,6 +112,8 @@ public class StackManager {
*/
private Map<String, ExtensionInfo> extensionMap = new HashMap<String, ExtensionInfo>();
+ private AmbariManagementHelper helper;
+
/**
* Constructor. Initialize stack manager.
*
@@ -131,6 +135,8 @@ public class StackManager {
* extension DAO automatically injected
* @param linkDao
* extension link DAO automatically injected
+ * @param helper
+ * Ambari management helper automatically injected
*
* @throws AmbariException
* if an exception occurs while processing the stacks
@@ -141,7 +147,7 @@ public class StackManager {
@Assisted("extensionRoot") @Nullable File extensionRoot,
@Assisted OsFamily osFamily, @Assisted boolean validate,
MetainfoDAO metaInfoDAO, ActionMetadata actionMetadata, StackDAO stackDao,
- ExtensionDAO extensionDao, ExtensionLinkDAO linkDao)
+ ExtensionDAO extensionDao, ExtensionLinkDAO linkDao, AmbariManagementHelper helper)
throws AmbariException {
LOG.info("Initializing the stack manager...");
@@ -154,7 +160,8 @@ public class StackManager {
stackMap = new HashMap<String, StackInfo>();
stackContext = new StackContext(metaInfoDAO, actionMetadata, osFamily);
- extensionMap = new HashMap<String, ExtensionInfo>();
+ extensionMap = new HashMap<>();
+ this.helper = helper;
parseDirectories(stackRoot, commonServicesRoot, extensionRoot);
@@ -189,6 +196,7 @@ public class StackManager {
LOG.info("About to parse extension directories");
extensionModules = parseExtensionDirectory(extensionRoot);
}
+
private void populateDB(StackDAO stackDao, ExtensionDAO extensionDao) throws AmbariException {
// for every stack read in, ensure that we have a database entry for it;
// don't put try/catch logic around this since a failure here will
@@ -227,6 +235,51 @@ public class StackManager {
extensionDao.create(extensionEntity);
}
}
+
+ createLinks();
+ }
+
+ /**
+ * Attempts to automatically create links between extension versions and stack versions.
+ * This is limited to 'active' extensions that have the 'autolink' attribute set (in the metainfo.xml).
+ * Stack versions are selected based on the minimum stack versions that the extension supports.
+ * The extension and stack versions are processed in order of most recent to oldest.
+ * In this manner, the newest extension version will be autolinked before older extension versions.
+ * If a different version of the same extension is already linked to a stack version then that stack version
+ * will be skipped.
+ */
+ private void createLinks() {
+ LOG.info("Creating links");
+ Collection<ExtensionInfo> extensions = getExtensions();
+ Set<String> names = new HashSet<String>();
+ for(ExtensionInfo extension : extensions){
+ names.add(extension.getName());
+ }
+ for(String name : names) {
+ createLinksForExtension(name);
+ }
+ }
+
+ /**
+ * Attempts to automatically create links between versions of a particular extension and stack versions they support.
+ * This is limited to 'active' extensions that have the 'autolink' attribute set (in the metainfo.xml).
+ * Stack versions are selected based on the minimum stack versions that the extension supports.
+ * The extension and stack versions are processed in order of most recent to oldest.
+ * In this manner, the newest extension version will be autolinked before older extension versions.
+ * If a different version of the same extension is already linked to a stack version then that stack version
+ * will be skipped.
+ */
+ private void createLinksForExtension(String name) {
+ Collection<ExtensionInfo> collection = getExtensions(name);
+ List<ExtensionInfo> extensions = new ArrayList<ExtensionInfo>(collection.size());
+ extensions.addAll(collection);
+ try {
+ helper.createExtensionLinks(this, extensions);
+ }
+ catch (AmbariException e) {
+ String msg = String.format("Failed to create link for extension: %s with exception: %s", name, e.getMessage());
+ LOG.error(msg);
+ }
}
/**
@@ -259,6 +312,24 @@ public class StackManager {
}
/**
+ * Obtain all a map of all stacks by name.
+ *
+ * @return A map of all stacks with the name as the key.
+ */
+ public Map<String, List<StackInfo>> getStacksByName() {
+ Map<String, List<StackInfo>> stacks = new HashMap<String, List<StackInfo>>();
+ for (StackInfo stack: stackMap.values()) {
+ List<StackInfo> list = stacks.get(stack.getName());
+ if (list == null) {
+ list = new ArrayList<StackInfo>();
+ stacks.put(stack.getName(), list);
+ }
+ list.add(stack);
+ }
+ return stacks;
+ }
+
+ /**
* Obtain all stacks.
*
* @return collection of all stacks
@@ -470,8 +541,6 @@ public class StackManager {
}
}
-
-
/**
* Validate that the specified extension root is a valid directory.
*
@@ -578,9 +647,11 @@ public class StackManager {
}
public void linkStackToExtension(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+ stack.addExtension(extension);
}
public void unlinkStackAndExtension(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+ stack.removeExtension(extension);
}
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 0313770..34a3047 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -404,9 +404,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
private void addExtensionServices() throws AmbariException {
for (ExtensionModule extension : extensionModules.values()) {
- stackInfo.getExtensions().add(extension.getModuleInfo());
- Collection<ServiceModule> services = extension.getServiceModules().values();
- addServices(services);
+ stackInfo.addExtension(extension.getModuleInfo());
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
index 89a6fb5..c05a466 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
@@ -30,6 +30,7 @@ import java.util.Set;
import org.apache.ambari.server.controller.ExtensionVersionResponse;
import org.apache.ambari.server.stack.Validable;
import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+import org.apache.ambari.server.utils.VersionUtils;
/**
* An extension version is like a stack version but it contains custom services. Linking an extension
@@ -45,6 +46,8 @@ public class ExtensionInfo implements Comparable<ExtensionInfo>, Validable{
private List<ExtensionMetainfoXml.Stack> stacks;
private List<ExtensionMetainfoXml.Extension> extensions;
private boolean valid = true;
+ private boolean autoLink = false;
+ private boolean active = false;
/**
*
@@ -185,9 +188,10 @@ public class ExtensionInfo implements Comparable<ExtensionInfo>, Validable{
@Override
public int compareTo(ExtensionInfo o) {
- String myId = name + "-" + version;
- String oId = o.name + "-" + o.version;
- return myId.compareTo(oId);
+ if (name.equals(o.name)) {
+ return VersionUtils.compareVersions(version, o.version);
+ }
+ return name.compareTo(o.name);
}
public List<ExtensionMetainfoXml.Stack> getStacks() {
@@ -205,4 +209,20 @@ public class ExtensionInfo implements Comparable<ExtensionInfo>, Validable{
public void setExtensions(List<ExtensionMetainfoXml.Extension> extensions) {
this.extensions = extensions;
}
+
+ public boolean isAutoLink() {
+ return autoLink;
+ }
+
+ public void setAutoLink(boolean autoLink) {
+ this.autoLink = autoLink;
+ }
+
+ public boolean isActive() {
+ return active;
+ }
+
+ public void setActive(boolean active) {
+ this.active = active;
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index b7f64f9..1658f1b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -36,6 +36,7 @@ import org.apache.ambari.server.state.stack.ConfigUpgradePack;
import org.apache.ambari.server.state.stack.RepositoryXml;
import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
import org.apache.ambari.server.state.stack.UpgradePack;
+import org.apache.ambari.server.utils.VersionUtils;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Multimaps;
@@ -200,6 +201,7 @@ public class StackInfo implements Comparable<StackInfo>, Validable{
public ExtensionInfo getExtensionByService(String serviceName) {
Collection<ExtensionInfo> extensions = getExtensions();
for (ExtensionInfo extension : extensions) {
+ Collection<ServiceInfo> services = extension.getServices();
for (ServiceInfo service : services) {
if (service.getName().equals(serviceName))
return extension;
@@ -209,6 +211,24 @@ public class StackInfo implements Comparable<StackInfo>, Validable{
return null;
}
+ public void addExtension(ExtensionInfo extension) {
+ Collection<ExtensionInfo> extensions = getExtensions();
+ extensions.add(extension);
+ Collection<ServiceInfo> services = getServices();
+ for (ServiceInfo service : extension.getServices()) {
+ services.add(service);
+ }
+ }
+
+ public void removeExtension(ExtensionInfo extension) {
+ Collection<ExtensionInfo> extensions = getExtensions();
+ extensions.remove(extension);
+ Collection<ServiceInfo> services = getServices();
+ for (ServiceInfo service : extension.getServices()) {
+ services.remove(service);
+ }
+ }
+
public List<PropertyInfo> getProperties() {
if (properties == null) properties = new ArrayList<>();
return properties;
@@ -476,9 +496,10 @@ public class StackInfo implements Comparable<StackInfo>, Validable{
@Override
public int compareTo(StackInfo o) {
- String myId = name + "-" + version;
- String oId = o.name + "-" + o.version;
- return myId.compareTo(oId);
+ if (name.equals(o.name)) {
+ return VersionUtils.compareVersions(version, o.version);
+ }
+ return name.compareTo(o.name);
}
//todo: ensure that required properties are never modified...
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
index 790e514..26572e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
@@ -77,6 +77,9 @@ public class ExtensionMetainfoXml implements Validable{
@XmlTransient
private boolean valid = true;
+ @XmlElement(name="auto-link")
+ private boolean autoLink = false;
+
/**
*
* @return valid xml flag
@@ -201,4 +204,12 @@ public class ExtensionMetainfoXml implements Validable{
}
}
+ public boolean isAutoLink() {
+ return autoLink;
+ }
+
+ public void setAutoLink(boolean autoLink) {
+ this.autoLink = autoLink;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
index 6503e7f..09a934e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
@@ -35,6 +35,7 @@ import java.util.List;
import java.util.Map;
import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
import org.apache.ambari.server.metadata.ActionMetadata;
import org.apache.ambari.server.orm.dao.ExtensionDAO;
import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -123,10 +124,11 @@ public class StackManagerCommonServicesTest {
osFamily = new OsFamily(config);
replay(metaInfoDao, actionMetadata);
+ AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
StackManager stackManager = new StackManager(new File(stackRoot), new File(
commonServicesRoot), new File(extensionRoot), osFamily, true, metaInfoDao,
- actionMetadata, stackDao, extensionDao, linkDao);
+ actionMetadata, stackDao, extensionDao, linkDao, helper);
EasyMock.verify( config, stackDao );
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
index 8165398..4ae52c0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
@@ -34,6 +34,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
import org.apache.ambari.server.metadata.ActionMetadata;
import org.apache.ambari.server.orm.dao.ExtensionDAO;
import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -63,42 +64,57 @@ public class StackManagerExtensionTest {
ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
OsFamily osFamily = createNiceMock(OsFamily.class);
- StackEntity stackEntity = createNiceMock(StackEntity.class);
- ExtensionEntity extensionEntity = createNiceMock(ExtensionEntity.class);
- ExtensionLinkEntity linkEntity = createNiceMock(ExtensionLinkEntity.class);
- List<ExtensionLinkEntity> list = new ArrayList<ExtensionLinkEntity>();
- list.add(linkEntity);
-
- expect(
- stackDao.find(EasyMock.anyObject(String.class),
- EasyMock.anyObject(String.class))).andReturn(stackEntity).atLeastOnce();
-
- expect(
- extensionDao.find(EasyMock.anyObject(String.class),
- EasyMock.anyObject(String.class))).andReturn(extensionEntity).atLeastOnce();
-
- expect(
- linkDao.findByStack(EasyMock.anyObject(String.class),
+ StackEntity stack1 = new StackEntity();
+ stack1.setStackName("HDP");
+ stack1.setStackVersion("0.1");
+ StackEntity stack2 = new StackEntity();
+ stack2.setStackName("HDP");
+ stack2.setStackVersion("0.2");
+ StackEntity stack3 = new StackEntity();
+ stack3.setStackName("HDP");
+ stack3.setStackVersion("0.3");
+ ExtensionEntity extension1 = new ExtensionEntity();
+ extension1.setExtensionName("EXT");
+ extension1.setExtensionVersion("0.1");
+ ExtensionEntity extension2 = new ExtensionEntity();
+ extension2.setExtensionName("EXT");
+ extension2.setExtensionVersion("0.2");
+ ExtensionEntity extension3 = new ExtensionEntity();
+ extension3.setExtensionName("EXT");
+ extension3.setExtensionVersion("0.3");
+ List<ExtensionLinkEntity> list = new ArrayList<>();
+
+ expect(stackDao.find("HDP", "0.1")).andReturn(stack1).atLeastOnce();
+ expect(stackDao.find("HDP", "0.2")).andReturn(stack2).atLeastOnce();
+ expect(stackDao.find("HDP", "0.3")).andReturn(stack3).atLeastOnce();
+ expect(extensionDao.find("EXT", "0.1")).andReturn(extension1).atLeastOnce();
+ expect(extensionDao.find("EXT", "0.2")).andReturn(extension2).atLeastOnce();
+ expect(extensionDao.find("EXT", "0.3")).andReturn(extension3).atLeastOnce();
+
+ expect(linkDao.findByStack(EasyMock.anyObject(String.class),
EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
- expect(
- linkEntity.getExtension()).andReturn(extensionEntity).atLeastOnce();
+ expect(linkDao.findByStackAndExtension("HDP", "0.2", "EXT", "0.2")).andReturn(null).atLeastOnce();
- expect(
- extensionEntity.getExtensionName()).andReturn("EXT").atLeastOnce();
-
- expect(
- extensionEntity.getExtensionVersion()).andReturn("0.2").atLeastOnce();
-
- replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao, extensionEntity, linkEntity);
+ replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao); //linkEntity
String stacks = ClassLoader.getSystemClassLoader().getResource("stacks_with_extensions").getPath();
String common = ClassLoader.getSystemClassLoader().getResource("common-services").getPath();
String extensions = ClassLoader.getSystemClassLoader().getResource("extensions").getPath();
- StackManager stackManager = new StackManager(new File(stacks),
+ AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
+
+ StackManager stackManager = null;
+ try {
+ stackManager = new StackManager(new File(stacks),
new File(common), new File(extensions), osFamily, false,
- metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+ metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ assertNotNull("Failed to create Stack Manager", stackManager);
ExtensionInfo extension = stackManager.getExtension("EXT", "0.1");
assertNull("EXT 0.1's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
@@ -123,6 +139,7 @@ public class StackManagerExtensionTest {
assertNotNull("EXT 0.2's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
assertEquals("EXT 0.2's parent: " + extension.getParentExtensionVersion(), "0.1", extension.getParentExtensionVersion());
assertNotNull(extension.getService("OOZIE2"));
+ assertTrue("Extension is not set to auto link", extension.isAutoLink());
oozie = extension.getService("OOZIE2");
assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
@@ -147,7 +164,13 @@ public class StackManagerExtensionTest {
assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
extension = stack.getExtensions().iterator().next();
assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
- assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.2");
+ assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+
+ stack = stackManager.getStack("HDP", "0.3");
+ assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
+ extension = stack.getExtensions().iterator().next();
+ assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
+ assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
index ca24cd9..6df46c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
@@ -32,6 +32,7 @@ import java.util.Collections;
import java.util.List;
import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
import org.apache.ambari.server.metadata.ActionMetadata;
import org.apache.ambari.server.orm.dao.ExtensionDAO;
import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -69,12 +70,13 @@ public class StackManagerMiscTest {
EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
replay(actionMetadata, stackDao, extensionDao, linkDao, metaInfoDao, osFamily);
+ AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
try {
String stacksCycle1 = ClassLoader.getSystemClassLoader().getResource("stacks_with_cycle").getPath();
StackManager stackManager = new StackManager(new File(stacksCycle1), null, null, osFamily, false,
- metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+ metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
fail("Expected exception due to cyclic stack");
} catch (AmbariException e) {
@@ -86,7 +88,7 @@ public class StackManagerMiscTest {
"stacks_with_cycle2").getPath();
StackManager stackManager = new StackManager(new File(stacksCycle2),
- null, null, osFamily, true, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+ null, null, osFamily, true, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
fail("Expected exception due to cyclic stack");
} catch (AmbariException e) {
@@ -124,10 +126,11 @@ public class StackManagerMiscTest {
replay(metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata, osFamily);
String singleStack = ClassLoader.getSystemClassLoader().getResource("single_stack").getPath();
+ AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
StackManager stackManager = new StackManager(new File(singleStack.replace(
StackManager.PATH_DELIMITER, File.separator)), null, null, osFamily, false, metaInfoDao,
- actionMetadata, stackDao, extensionDao, linkDao);
+ actionMetadata, stackDao, extensionDao, linkDao, helper);
Collection<StackInfo> stacks = stackManager.getStacks();
assertEquals(1, stacks.size());
@@ -161,11 +164,13 @@ public class StackManagerMiscTest {
replay(metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata, osFamily);
+ AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
+
try {
String upgradeCycle = ClassLoader.getSystemClassLoader().getResource("stacks_with_upgrade_cycle").getPath();
StackManager stackManager = new StackManager(new File(upgradeCycle), null, null, osFamily, false,
- metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+ metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
fail("Expected exception due to cyclic service upgrade xml");
} catch (AmbariException e) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
index 1b9e15f..4e7d040 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
@@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReentrantLock;
import javax.annotation.Nullable;
import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
import org.apache.ambari.server.metadata.ActionMetadata;
import org.apache.ambari.server.orm.dao.ExtensionDAO;
import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -133,8 +134,8 @@ public class StackManagerMock extends StackManager {
File commonServicesRoot, @Assisted("extensionRoot") @Nullable File extensionRoot,
@Assisted OsFamily osFamily, @Assisted boolean validate, MetainfoDAO metaInfoDAO,
ActionMetadata actionMetadata, StackDAO stackDao, ExtensionDAO extensionDao,
- ExtensionLinkDAO linkDao) throws AmbariException {
- super(stackRoot, commonServicesRoot, extensionRoot, osFamily, validate, metaInfoDAO, actionMetadata, stackDao, extensionDao, linkDao);
+ ExtensionLinkDAO linkDao, AmbariManagementHelper helper) throws AmbariException {
+ super(stackRoot, commonServicesRoot, extensionRoot, osFamily, validate, metaInfoDAO, actionMetadata, stackDao, extensionDao, linkDao, helper);
currentStackRoot = stackRoot;
currentCommonServicesRoot = commonServicesRoot;
currentExtensionRoot = extensionRoot;
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index 090bf55..74a8f29 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -44,6 +44,7 @@ import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.Role;
import org.apache.ambari.server.RoleCommand;
import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
import org.apache.ambari.server.metadata.ActionMetadata;
import org.apache.ambari.server.orm.dao.ExtensionDAO;
import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -119,9 +120,10 @@ public class StackManagerTest {
replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
osFamily = new OsFamily(config);
+ AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
StackManager stackManager = new StackManager(new File(stackRoot), null, null, osFamily, false,
- metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+ metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
verify(config, metaInfoDao, stackDao, actionMetadata);
@@ -778,9 +780,10 @@ public class StackManagerTest {
replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
OsFamily osFamily = new OsFamily(config);
+ AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
StackManager stackManager = new StackManager(stackRoot, commonServices, extensions,
- osFamily, false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+ osFamily, false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
for (StackInfo stackInfo : stackManager.getStacks()) {
for (ServiceInfo serviceInfo : stackInfo.getServices()) {
@@ -843,9 +846,10 @@ public class StackManagerTest {
replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
OsFamily osFamily = new OsFamily(config);
+ AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
StackManager stackManager = new StackManager(stackRoot, commonServices, extensions, osFamily,
- false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+ false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
String rangerUserSyncRoleCommand = Role.RANGER_USERSYNC + "-" + RoleCommand.START;
String rangerAdminRoleCommand = Role.RANGER_ADMIN + "-" + RoleCommand.START;
@@ -972,9 +976,10 @@ public class StackManagerTest {
replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
OsFamily osFamily = new OsFamily(config);
+ AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
StackManager stackManager = new StackManager(stackRoot, commonServices, extensions, osFamily,
- false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+ false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
String zookeeperServerRoleCommand = Role.ZOOKEEPER_SERVER + "-" + RoleCommand.START;
String logsearchServerRoleCommand = Role.LOGSEARCH_SERVER + "-" + RoleCommand.START;
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
index 77a832c..27f5902 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
@@ -23,7 +23,7 @@
<min-stack-versions>
<stack>
<name>HDP</name>
- <version>0.1.*</version>
+ <version>0.1</version>
</stack>
</min-stack-versions>
</prerequisites>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
index 04f733c..0d37b3e 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
@@ -20,11 +20,12 @@
<active>true</active>
</versions>
<extends>0.1</extends>
+ <auto-link>true</auto-link>
<prerequisites>
<min-stack-versions>
<stack>
<name>HDP</name>
- <version>0.2.*</version>
+ <version>0.2</version>
</stack>
</min-stack-versions>
</prerequisites>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
new file mode 100644
index 0000000..d827314
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <versions>
+ <active>true</active>
+ </versions>
+ <extends>0.2</extends>
+ <auto-link>true</auto-link>
+ <prerequisites>
+ <min-stack-versions>
+ <stack>
+ <name>HDP</name>
+ <version>0.2</version>
+ </stack>
+ </min-stack-versions>
+ </prerequisites>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml
new file mode 100644
index 0000000..9176551
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml
@@ -0,0 +1,118 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>OOZIE2</name>
+ <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
+ <version>4.0.0</version>
+
+ <components>
+ <component>
+ <name>OOZIE2_SERVER</name>
+ <category>MASTER</category>
+ <cardinality>1</cardinality>
+ <dependencies>
+ <dependency>
+ <name>HDFS/HDFS_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/oozie2_server.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>OOZIE2_CLIENT</name>
+ <category>CLIENT</category>
+ <cardinality>0+</cardinality>
+ <dependencies>
+ <dependency>
+ <name>HDFS/HDFS_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ <dependency>
+ <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+ <scope>host</scope>
+ <auto-deploy>
+ <enabled>true</enabled>
+ </auto-deploy>
+ </dependency>
+ </dependencies>
+ <commandScript>
+ <script>scripts/oozie2_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ </component>
+ </components>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>oozie2.noarch</name>
+ </package>
+ <package>
+ <name>oozie2-client.noarch</name>
+ </package>
+ <package>
+ <name>extjs-2.2-1</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <configuration-dependencies>
+ <config-type>global</config-type>
+ <config-type>oozie2-site</config-type>
+ </configuration-dependencies>
+
+ <themes>
+ <theme>
+ <fileName>broken_theme.json</fileName>
+ <default>true</default>
+ </theme>
+ </themes>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json
new file mode 100644
index 0000000..6e8b5bf
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json
@@ -0,0 +1,3 @@
+{
+ "configuration": {
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml
new file mode 100644
index 0000000..b52857b
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <versions>
+ <upgrade>0.2</upgrade>
+ </versions>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml
new file mode 100644
index 0000000..9b3b1c7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<reposinfo>
+ <os family="redhat6">
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+ <repoid>HDP-1.1.1.16</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+ <repoid>HDP-UTILS-1.1.0.15</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ <mirrorslist></mirrorslist>
+ </repo>
+ <repo>
+ <baseurl></baseurl>
+ <repoid>epel</repoid>
+ <reponame>epel</reponame>
+ <unique>true</unique>
+ <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+ </repo>
+ </os>
+ <os family="centos5">
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+ <repoid>HDP-1.1.1.16</repoid>
+ <reponame>HDP</reponame>
+ <unique>true</unique>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+ <repoid>HDP-UTILS-1.1.0.15</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <unique>false</unique>
+ <mirrorslist></mirrorslist>
+ </repo>
+ <repo>
+ <baseurl></baseurl>
+ <repoid>epel</repoid>
+ <reponame>epel</reponame>
+ <unique>true</unique>
+ <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+ </repo>
+ </os>
+</reposinfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..48123f0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HBASE</name>
+ <extends>common-services/HBASE/1.0</extends>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..bcab577
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml
@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>namenode_host</name>
+ <value></value>
+ <description>NameNode Host.</description>
+ </property>
+ <property>
+ <name>dfs_name_dir</name>
+ <value>/hadoop/hdfs/namenode</value>
+ <description>NameNode Directories.</description>
+ </property>
+ <property>
+ <name>snamenode_host</name>
+ <value></value>
+ <description>Secondary NameNode.</description>
+ </property>
+ <property>
+ <name>fs_checkpoint_dir</name>
+ <value>/hadoop/hdfs/namesecondary</value>
+ <description>Secondary NameNode checkpoint dir.</description>
+ </property>
+ <property>
+ <name>datanode_hosts</name>
+ <value></value>
+ <description>List of Datanode Hosts.</description>
+ </property>
+ <property>
+ <name>dfs_data_dir</name>
+ <value>/hadoop/hdfs/data</value>
+ <description>Data directories for Data Nodes.</description>
+ </property>
+ <property>
+ <name>hdfs_log_dir_prefix</name>
+ <value>/var/log/hadoop</value>
+ <description>Hadoop Log Dir Prefix</description>
+ </property>
+ <property>
+ <name>hadoop_pid_dir_prefix</name>
+ <value>/var/run/hadoop</value>
+ <description>Hadoop PID Dir Prefix</description>
+ </property>
+ <property>
+ <name>dfs_webhdfs_enabled</name>
+ <value>true</value>
+ <description>WebHDFS enabled</description>
+ </property>
+ <property>
+ <name>hadoop_heapsize</name>
+ <value>1024</value>
+ <description>Hadoop maximum Java heap size</description>
+ </property>
+ <property>
+ <name>namenode_heapsize</name>
+ <value>1024</value>
+ <description>NameNode Java heap size</description>
+ </property>
+ <property>
+ <name>namenode_opt_newsize</name>
+ <value>200</value>
+ <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+ </property>
+ <property>
+ <name>namenode_opt_maxnewsize</name>
+ <value>640</value>
+ <description>NameNode maximum new generation size</description>
+ </property>
+ <property>
+ <name>namenode_opt_permsize</name>
+ <value>128</value>
+ <description>NameNode permanent generation size</description>
+ </property>
+ <property>
+ <name>namenode_opt_maxpermsize</name>
+ <value>256</value>
+ <description>NameNode maximum permanent generation size</description>
+ </property>
+ <property>
+ <name>datanode_du_reserved</name>
+ <value>1</value>
+ <description>Reserved space for HDFS</description>
+ </property>
+ <property>
+ <name>dtnode_heapsize</name>
+ <value>1024</value>
+ <description>DataNode maximum Java heap size</description>
+ </property>
+ <property>
+ <name>dfs_datanode_failed_volume_tolerated</name>
+ <value>0</value>
+ <description>DataNode volumes failure toleration</description>
+ </property>
+ <property>
+ <name>fs_checkpoint_period</name>
+ <value>21600</value>
+ <description>HDFS Maximum Checkpoint Delay</description>
+ </property>
+ <property>
+ <name>fs_checkpoint_size</name>
+ <value>0.5</value>
+ <description>FS Checkpoint Size.</description>
+ </property>
+ <property>
+ <name>security_enabled</name>
+ <value>false</value>
+ <description>Hadoop Security</description>
+ </property>
+ <property>
+ <name>kerberos_domain</name>
+ <value>EXAMPLE.COM</value>
+ <description>Kerberos realm.</description>
+ </property>
+ <property>
+ <name>kerberos_domain</name>
+ <value>EXAMPLE.COM</value>
+ <description>Kerberos realm.</description>
+ </property>
+ <property>
+ <name>keytab_path</name>
+ <value>/etc/security/keytabs</value>
+ <description>KeyTab Directory.</description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8fb8c7f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>hdfs_log_dir_prefix</name>
+ <value>/var/log/hadoop</value>
+ <description>Hadoop Log Dir Prefix</description>
+ </property>
+ <property>
+ <name>hadoop_pid_dir_prefix</name>
+ <value>/var/run/hadoop</value>
+ <description>Hadoop PID Dir Prefix</description>
+ </property>
+ <property>
+ <name>hadoop_heapsize</name>
+ <value>1024</value>
+ <description>Hadoop maximum Java heap size</description>
+ </property>
+ <property>
+ <name>namenode_heapsize</name>
+ <value>1024</value>
+ <description>NameNode Java heap size</description>
+ </property>
+ <property>
+ <name>namenode_opt_newsize</name>
+ <value>200</value>
+ <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+ </property>
+ <property>
+ <name>namenode_opt_maxnewsize</name>
+ <value>200</value>
+ <description>NameNode maximum new generation size</description>
+ </property>
+ <property>
+ <name>namenode_opt_permsize</name>
+ <value>128</value>
+ <description>NameNode permanent generation size</description>
+ </property>
+ <property>
+ <name>namenode_opt_maxpermsize</name>
+ <value>256</value>
+ <description>NameNode maximum permanent generation size</description>
+ </property>
+ <property>
+ <name>dtnode_heapsize</name>
+ <value>1024</value>
+ <description>DataNode maximum Java heap size</description>
+ </property>
+ <property>
+ <name>proxyuser_group</name>
+ <value>users</value>
+ <description>Proxy user group.</description>
+ </property>
+ <property>
+ <name>security_enabled</name>
+ <value>false</value>
+ <description>Hadoop Security</description>
+ </property>
+ <property>
+ <name>kerberos_domain</name>
+ <value>EXAMPLE.COM</value>
+ <description>Kerberos realm.</description>
+ </property>
+ <property>
+ <name>hdfs_user</name>
+ <value>hdfs</value>
+ <description>User and Groups.</description>
+ </property>
+ <property>
+ <name>ignore_groupsusers_create</name>
+ <value>false</value>
+ <description>Whether to ignores failures on users and group creation</description>
+ </property>
+ <property>
+ <name>smokeuser</name>
+ <value>ambari-qa</value>
+ <description>User executing service checks</description>
+ </property>
+ <property>
+ <name>user_group</name>
+ <value>hadoop</value>
+ <description>Proxy user group.</description>
+ </property>
+
+ <!-- hadoop-env.sh -->
+ <property>
+ <name>content</name>
+ <description>hadoop-env.sh content</description>
+ <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use. Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options. Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options. Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored. $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from. Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+ JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+ JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+ export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+ </value>
+ </property>
+
+</configuration>