You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2014/01/29 04:32:24 UTC

[1/3] AMBARI-4448. Some of the configuration parameters do not have description. (jaimin via yusaku)

Updated Branches:
  refs/heads/trunk 4a008bdc6 -> 8017fec53


http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml
index bf4533f..c149fa4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml
@@ -15,299 +15,315 @@
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
--->     
+-->
 
 <configuration>
 
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
   <property>
     <name>oozie.base.url</name>
     <value>http://localhost:11000/oozie</value>
     <description>Base Oozie URL.</description>
-   </property>
+  </property>
 
   <property>
     <name>oozie.system.id</name>
     <value>oozie-${user.name}</value>
     <description>
-    The Oozie system ID.
+      The Oozie system ID.
     </description>
-   </property>
+  </property>
 
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
+  <property>
+    <name>oozie.systemmode</name>
+    <value>NORMAL</value>
+    <description>
+      System mode for Oozie at startup.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
+  <property>
+    <name>oozie.service.PurgeService.older.than</name>
+    <value>30</value>
+    <description>
+      Jobs older than this value, in days, will be purged by the PurgeService.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
+  <property>
+    <name>oozie.service.PurgeService.purge.interval</name>
+    <value>3600</value>
+    <description>
+      Interval at which the purge service will run, in seconds.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
+  <property>
+    <name>oozie.service.CallableQueueService.queue.size</name>
+    <value>1000</value>
+    <description>Max callable queue size</description>
+  </property>
 
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
+  <property>
+    <name>oozie.service.CallableQueueService.threads</name>
+    <value>10</value>
+    <description>Number of threads used for executing callables</description>
+  </property>
 
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
+  <property>
+    <name>oozie.service.CallableQueueService.callable.concurrency</name>
+    <value>3</value>
+    <description>
+      Maximum concurrency for a given callable type.
+      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+      All commands that use action executors (action-start, action-end, action-kill and action-check) use
+      the action type as the callable type.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
+  <property>
+    <name>oozie.service.coord.normal.default.timeout</name>
+    <value>120</value>
+    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <description>
       Oozie DataBase Name
-     </description>
-   </property>
+    </description>
+  </property>
 
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
+  <property>
+    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+    <value></value>
+    <description>
       Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
+      #AUTHENTICATION_HANDLER_CLASSNAME#.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+    <value></value>
+    <description>
+      Whitelisted NameNode for Oozie service.
+    </description>
+  </property>
 
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
+  <property>
+    <name>oozie.service.WorkflowAppService.system.libpath</name>
+    <value>/user/${user.name}/share/lib</value>
+    <description>
       System library path to use for workflow applications.
       This path is added to workflow application if their job properties sets
       the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
+    </description>
+  </property>
 
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
+  <property>
+    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+    <value>false</value>
+    <description>
       If set to true, submissions of MapReduce and Pig jobs will include
       automatically the system library path, thus not requiring users to
       specify where the Pig JAR files are. Instead, the ones from the system
       library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
+    </description>
+  </property>
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <value>
+      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      DEFAULT
+    </value>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/etc/hadoop/conf</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.ActionService.executor.ext.classes</name>
+    <value>
+      org.apache.oozie.action.email.EmailActionExecutor,
+      org.apache.oozie.action.hadoop.HiveActionExecutor,
+      org.apache.oozie.action.hadoop.ShellActionExecutor,
+      org.apache.oozie.action.hadoop.SqoopActionExecutor,
+      org.apache.oozie.action.hadoop.DistcpActionExecutor
+    </value>
+    <description>
+      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
+      be used in workflows. This property is a convenience property to add extensions to the built in executors without
+      having to include all the built in ones.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    <value>
+      shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd
+    </value>
+    <description>
+      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
+      trims the value, if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.create.db.schema</name>
+    <value>false</value>
+    <description>
+      Creates Oozie DB.
 
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
+      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <description>
+      JDBC driver class.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <description>
+      JDBC URL.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>oozie</value>
-        <description>
-          Database user name to use to connect to the database
-        </description>
-    </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>oozie</value>
+    <description>
+      Database user name to use to connect to the database
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
+  <property>
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value></value>
+    <description>
+      DB user password.
 
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
+      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
+  <property>
+    <name>oozie.service.JPAService.pool.max.active.conn</name>
+    <value>10</value>
+    <description>
+      Max number of connections.
+    </description>
+  </property>
 
-    <property>
-      <name>oozie.services</name>
-      <value>
-        org.apache.oozie.service.SchedulerService,
-        org.apache.oozie.service.InstrumentationService,
-        org.apache.oozie.service.CallableQueueService,
-        org.apache.oozie.service.UUIDService,
-        org.apache.oozie.service.ELService,
-        org.apache.oozie.service.AuthorizationService,
-        org.apache.oozie.service.UserGroupInformationService,
-        org.apache.oozie.service.HadoopAccessorService,
-        org.apache.oozie.service.URIHandlerService,
-        org.apache.oozie.service.MemoryLocksService,
-        org.apache.oozie.service.DagXLogInfoService,
-        org.apache.oozie.service.SchemaService,
-        org.apache.oozie.service.LiteWorkflowAppService,
-        org.apache.oozie.service.JPAService,
-        org.apache.oozie.service.StoreService,
-        org.apache.oozie.service.CoordinatorStoreService,
-        org.apache.oozie.service.SLAStoreService,
-        org.apache.oozie.service.DBLiteWorkflowStoreService,
-        org.apache.oozie.service.CallbackService,
-        org.apache.oozie.service.ActionService,
-        org.apache.oozie.service.ActionCheckerService,
-        org.apache.oozie.service.RecoveryService,
-        org.apache.oozie.service.PurgeService,
-        org.apache.oozie.service.CoordinatorEngineService,
-        org.apache.oozie.service.BundleEngineService,
-        org.apache.oozie.service.DagEngineService,
-        org.apache.oozie.service.CoordMaterializeTriggerService,
-        org.apache.oozie.service.StatusTransitService,
-        org.apache.oozie.service.PauseTransitService,
-        org.apache.oozie.service.GroupsService,
-        org.apache.oozie.service.ProxyUserService
-      </value>
-      <description>List of Oozie services</description>
-    </property>
-    <property>
-      <name>oozie.service.URIHandlerService.uri.handlers</name>
-      <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
-      <description>
-        Enlist the different uri handlers supported for data availability checks.
-      </description>
-    </property>
-    <property>
+  <property>
+    <name>oozie.services</name>
+    <value>
+      org.apache.oozie.service.SchedulerService,
+      org.apache.oozie.service.InstrumentationService,
+      org.apache.oozie.service.CallableQueueService,
+      org.apache.oozie.service.UUIDService,
+      org.apache.oozie.service.ELService,
+      org.apache.oozie.service.AuthorizationService,
+      org.apache.oozie.service.UserGroupInformationService,
+      org.apache.oozie.service.HadoopAccessorService,
+      org.apache.oozie.service.URIHandlerService,
+      org.apache.oozie.service.MemoryLocksService,
+      org.apache.oozie.service.DagXLogInfoService,
+      org.apache.oozie.service.SchemaService,
+      org.apache.oozie.service.LiteWorkflowAppService,
+      org.apache.oozie.service.JPAService,
+      org.apache.oozie.service.StoreService,
+      org.apache.oozie.service.CoordinatorStoreService,
+      org.apache.oozie.service.SLAStoreService,
+      org.apache.oozie.service.DBLiteWorkflowStoreService,
+      org.apache.oozie.service.CallbackService,
+      org.apache.oozie.service.ActionService,
+      org.apache.oozie.service.ActionCheckerService,
+      org.apache.oozie.service.RecoveryService,
+      org.apache.oozie.service.PurgeService,
+      org.apache.oozie.service.CoordinatorEngineService,
+      org.apache.oozie.service.BundleEngineService,
+      org.apache.oozie.service.DagEngineService,
+      org.apache.oozie.service.CoordMaterializeTriggerService,
+      org.apache.oozie.service.StatusTransitService,
+      org.apache.oozie.service.PauseTransitService,
+      org.apache.oozie.service.GroupsService,
+      org.apache.oozie.service.ProxyUserService
+    </value>
+    <description>List of Oozie services</description>
+  </property>
+  <property>
+    <name>oozie.service.URIHandlerService.uri.handlers</name>
+    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
+    <description>
+      Enlist the different uri handlers supported for data availability checks.
+    </description>
+  </property>
+  <property>
     <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService</value>
+    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
+    </value>
     <description>
-       To add/replace services defined in 'oozie.services' with custom implementations.
-       Class names must be separated by commas.
+      To add/replace services defined in 'oozie.services' with custom implementations.
+      Class names must be separated by commas.
     </description>
-    </property>
-    <property>
+  </property>
+  <property>
     <name>oozie.service.coord.push.check.requeue.interval</name>
     <value>30000</value>
     <description>
-        Command re-queue interval for push dependencies (in millisecond).
+      Command re-queue interval for push dependencies (in millisecond).
+    </description>
+  </property>
+  <property>
+    <name>oozie.credentials.credentialclasses</name>
+    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
+    <description>
+      Credential Class to be used for HCat.
     </description>
-    </property>
-    <property>
-      <name>oozie.credentials.credentialclasses</name>
-      <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
-      <description>
-        Credential Class to be used for HCat.
-      </description>
-    </property>
+  </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/capacity-scheduler.xml
index 4a19779..695a7ed 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/capacity-scheduler.xml
@@ -120,7 +120,10 @@
     <name>yarn.scheduler.capacity.node-locality-delay</name>
     <value>40</value>
     <description>
-      No description
+      Number of missed scheduling opportunities after which the CapacityScheduler
+      attempts to schedule rack-local containers.
+      Typically this should be set to number of nodes in the cluster, By default is setting
+      approximately number of nodes in one rack which is 40.
     </description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
index 05e23a9..3c4d24c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
@@ -32,6 +32,7 @@
   <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>localhost:8025</value>
+    <description> The address of ResourceManager. </description>
   </property>
 
   <property>
@@ -84,11 +85,13 @@
   <property>
     <name>yarn.acl.enable</name>
     <value>true</value>
+    <description> Are acls enabled. </description>
   </property>
 
   <property>
     <name>yarn.admin.acl</name>
     <value>*</value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
   </property>
 
   <!-- NodeManager -->
@@ -144,6 +147,7 @@
   <property>
     <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
     <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+    <description>The auxiliary service class to use </description>
   </property>
 
   <property>
@@ -211,12 +215,13 @@
   <property>
     <name>yarn.log-aggregation-enable</name>
     <value>true</value>
-    <description>Whether to enable log aggregation</description>
+    <description>Whether to enable log aggregation. </description>
   </property>
 
   <property>
     <name>yarn.nodemanager.remote-app-log-dir</name>
     <value>/app-logs</value>
+    <description>Location to aggregate logs to. </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
index bf4af7d..d1e933d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
@@ -263,6 +263,9 @@
   <property>
     <name>hbase.security.authentication</name>
     <value>simple</value>
+    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+      (no authentication), and 'kerberos'.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
index e244fc7..356cba4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
@@ -36,6 +36,8 @@
   <property>
     <name>io.serializations</name>
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
index 61b21f8..296b0c1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
@@ -491,6 +491,10 @@
   <property>
     <name>dfs.domain.socket.path</name>
     <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>
+      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
+      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
index b3ed5f5..bfdc8ac 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
@@ -134,16 +134,22 @@ limitations under the License.
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
+    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
+      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
+      process runs as.
+    </description>
   </property>
 
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
   </property>
 
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
+    <description>Disable local filesystem cache.</description>
   </property>
 
   <property>
@@ -167,11 +173,18 @@ limitations under the License.
   <property>
     <name>hive.optimize.bucketmapjoin</name>
     <value>true</value>
+    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
+      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
+      this parameter as true.
+    </description>
   </property>
 
   <property>
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+    of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
   </property>
 
   <property>
@@ -198,6 +211,7 @@ limitations under the License.
   <property>
     <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
     <value>true</value>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
   </property>
 
   <property>
@@ -249,11 +263,15 @@ limitations under the License.
   <property>
     <name>hive.vectorized.execution.enabled</name>
     <value>false</value>
+    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
+    </description>
   </property>
 
   <property>
     <name>hive.optimize.reducededuplication</name>
     <value>true</value>
+    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
index bf4533f..c149fa4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
@@ -15,299 +15,315 @@
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
--->     
+-->
 
 <configuration>
 
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
   <property>
     <name>oozie.base.url</name>
     <value>http://localhost:11000/oozie</value>
     <description>Base Oozie URL.</description>
-   </property>
+  </property>
 
   <property>
     <name>oozie.system.id</name>
     <value>oozie-${user.name}</value>
     <description>
-    The Oozie system ID.
+      The Oozie system ID.
     </description>
-   </property>
+  </property>
 
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
+  <property>
+    <name>oozie.systemmode</name>
+    <value>NORMAL</value>
+    <description>
+      System mode for Oozie at startup.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
+  <property>
+    <name>oozie.service.PurgeService.older.than</name>
+    <value>30</value>
+    <description>
+      Jobs older than this value, in days, will be purged by the PurgeService.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
+  <property>
+    <name>oozie.service.PurgeService.purge.interval</name>
+    <value>3600</value>
+    <description>
+      Interval at which the purge service will run, in seconds.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
+  <property>
+    <name>oozie.service.CallableQueueService.queue.size</name>
+    <value>1000</value>
+    <description>Max callable queue size</description>
+  </property>
 
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
+  <property>
+    <name>oozie.service.CallableQueueService.threads</name>
+    <value>10</value>
+    <description>Number of threads used for executing callables</description>
+  </property>
 
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
+  <property>
+    <name>oozie.service.CallableQueueService.callable.concurrency</name>
+    <value>3</value>
+    <description>
+      Maximum concurrency for a given callable type.
+      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+      All commands that use action executors (action-start, action-end, action-kill and action-check) use
+      the action type as the callable type.
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
+  <property>
+    <name>oozie.service.coord.normal.default.timeout</name>
+    <value>120</value>
+    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout
+    </description>
+  </property>
 
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <description>
       Oozie DataBase Name
-     </description>
-   </property>
+    </description>
+  </property>
 
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
+  <property>
+    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+    <value></value>
+    <description>
       Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
+      #AUTHENTICATION_HANDLER_CLASSNAME#.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+    <value></value>
+    <description>
+      Whitelisted NameNode for Oozie service.
+    </description>
+  </property>
 
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
+  <property>
+    <name>oozie.service.WorkflowAppService.system.libpath</name>
+    <value>/user/${user.name}/share/lib</value>
+    <description>
       System library path to use for workflow applications.
       This path is added to workflow application if their job properties sets
       the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
+    </description>
+  </property>
 
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
+  <property>
+    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+    <value>false</value>
+    <description>
       If set to true, submissions of MapReduce and Pig jobs will include
       automatically the system library path, thus not requiring users to
       specify where the Pig JAR files are. Instead, the ones from the system
       library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
+    </description>
+  </property>
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <value>
+      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      DEFAULT
+    </value>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/etc/hadoop/conf</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.ActionService.executor.ext.classes</name>
+    <value>
+      org.apache.oozie.action.email.EmailActionExecutor,
+      org.apache.oozie.action.hadoop.HiveActionExecutor,
+      org.apache.oozie.action.hadoop.ShellActionExecutor,
+      org.apache.oozie.action.hadoop.SqoopActionExecutor,
+      org.apache.oozie.action.hadoop.DistcpActionExecutor
+    </value>
+    <description>
+      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
+      be used in workflows. This property is a convenience property to add extensions to the built in executors without
+      having to include all the built in ones.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    <value>
+      shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd
+    </value>
+    <description>
+      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
+      trims the value, if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.create.db.schema</name>
+    <value>false</value>
+    <description>
+      Creates Oozie DB.
 
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
+      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <description>
+      JDBC driver class.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <description>
+      JDBC URL.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>oozie</value>
-        <description>
-          Database user name to use to connect to the database
-        </description>
-    </property>
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>oozie</value>
+    <description>
+      Database user name to use to connect to the database
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
+  <property>
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value></value>
+    <description>
+      DB user password.
 
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
+      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+    </description>
+  </property>
 
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
+  <property>
+    <name>oozie.service.JPAService.pool.max.active.conn</name>
+    <value>10</value>
+    <description>
+      Max number of connections.
+    </description>
+  </property>
 
-    <property>
-      <name>oozie.services</name>
-      <value>
-        org.apache.oozie.service.SchedulerService,
-        org.apache.oozie.service.InstrumentationService,
-        org.apache.oozie.service.CallableQueueService,
-        org.apache.oozie.service.UUIDService,
-        org.apache.oozie.service.ELService,
-        org.apache.oozie.service.AuthorizationService,
-        org.apache.oozie.service.UserGroupInformationService,
-        org.apache.oozie.service.HadoopAccessorService,
-        org.apache.oozie.service.URIHandlerService,
-        org.apache.oozie.service.MemoryLocksService,
-        org.apache.oozie.service.DagXLogInfoService,
-        org.apache.oozie.service.SchemaService,
-        org.apache.oozie.service.LiteWorkflowAppService,
-        org.apache.oozie.service.JPAService,
-        org.apache.oozie.service.StoreService,
-        org.apache.oozie.service.CoordinatorStoreService,
-        org.apache.oozie.service.SLAStoreService,
-        org.apache.oozie.service.DBLiteWorkflowStoreService,
-        org.apache.oozie.service.CallbackService,
-        org.apache.oozie.service.ActionService,
-        org.apache.oozie.service.ActionCheckerService,
-        org.apache.oozie.service.RecoveryService,
-        org.apache.oozie.service.PurgeService,
-        org.apache.oozie.service.CoordinatorEngineService,
-        org.apache.oozie.service.BundleEngineService,
-        org.apache.oozie.service.DagEngineService,
-        org.apache.oozie.service.CoordMaterializeTriggerService,
-        org.apache.oozie.service.StatusTransitService,
-        org.apache.oozie.service.PauseTransitService,
-        org.apache.oozie.service.GroupsService,
-        org.apache.oozie.service.ProxyUserService
-      </value>
-      <description>List of Oozie services</description>
-    </property>
-    <property>
-      <name>oozie.service.URIHandlerService.uri.handlers</name>
-      <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
-      <description>
-        Enlist the different uri handlers supported for data availability checks.
-      </description>
-    </property>
-    <property>
+  <property>
+    <name>oozie.services</name>
+    <value>
+      org.apache.oozie.service.SchedulerService,
+      org.apache.oozie.service.InstrumentationService,
+      org.apache.oozie.service.CallableQueueService,
+      org.apache.oozie.service.UUIDService,
+      org.apache.oozie.service.ELService,
+      org.apache.oozie.service.AuthorizationService,
+      org.apache.oozie.service.UserGroupInformationService,
+      org.apache.oozie.service.HadoopAccessorService,
+      org.apache.oozie.service.URIHandlerService,
+      org.apache.oozie.service.MemoryLocksService,
+      org.apache.oozie.service.DagXLogInfoService,
+      org.apache.oozie.service.SchemaService,
+      org.apache.oozie.service.LiteWorkflowAppService,
+      org.apache.oozie.service.JPAService,
+      org.apache.oozie.service.StoreService,
+      org.apache.oozie.service.CoordinatorStoreService,
+      org.apache.oozie.service.SLAStoreService,
+      org.apache.oozie.service.DBLiteWorkflowStoreService,
+      org.apache.oozie.service.CallbackService,
+      org.apache.oozie.service.ActionService,
+      org.apache.oozie.service.ActionCheckerService,
+      org.apache.oozie.service.RecoveryService,
+      org.apache.oozie.service.PurgeService,
+      org.apache.oozie.service.CoordinatorEngineService,
+      org.apache.oozie.service.BundleEngineService,
+      org.apache.oozie.service.DagEngineService,
+      org.apache.oozie.service.CoordMaterializeTriggerService,
+      org.apache.oozie.service.StatusTransitService,
+      org.apache.oozie.service.PauseTransitService,
+      org.apache.oozie.service.GroupsService,
+      org.apache.oozie.service.ProxyUserService
+    </value>
+    <description>List of Oozie services</description>
+  </property>
+  <property>
+    <name>oozie.service.URIHandlerService.uri.handlers</name>
+    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
+    <description>
+      Enlist the different uri handlers supported for data availability checks.
+    </description>
+  </property>
+  <property>
     <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService</value>
+    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
+    </value>
     <description>
-       To add/replace services defined in 'oozie.services' with custom implementations.
-       Class names must be separated by commas.
+      To add/replace services defined in 'oozie.services' with custom implementations.
+      Class names must be separated by commas.
     </description>
-    </property>
-    <property>
+  </property>
+  <property>
     <name>oozie.service.coord.push.check.requeue.interval</name>
     <value>30000</value>
     <description>
-        Command re-queue interval for push dependencies (in millisecond).
+      Command re-queue interval for push dependencies (in millisecond).
+    </description>
+  </property>
+  <property>
+    <name>oozie.credentials.credentialclasses</name>
+    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
+    <description>
+      Credential Class to be used for HCat.
     </description>
-    </property>
-    <property>
-      <name>oozie.credentials.credentialclasses</name>
-      <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
-      <description>
-        Credential Class to be used for HCat.
-      </description>
-    </property>
+  </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
index 4a19779..695a7ed 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
@@ -120,7 +120,10 @@
     <name>yarn.scheduler.capacity.node-locality-delay</name>
     <value>40</value>
     <description>
-      No description
+      Number of missed scheduling opportunities after which the CapacityScheduler
+      attempts to schedule rack-local containers.
+      Typically this should be set to number of nodes in the cluster, By default is setting
+      approximately number of nodes in one rack which is 40.
     </description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
index ce12380..9389ed0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
@@ -27,11 +27,24 @@
   <property>
     <name>mapred.queue.default.acl-submit-job</name>
     <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to submit jobs to the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to
+      submit jobs.
+    </description>
   </property>
 
   <property>
     <name>mapred.queue.default.acl-administer-jobs</name>
     <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to delete jobs or modify job's priority for jobs not owned by the current
+      user in the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to do
+      this operation.
+    </description>
   </property>
 
   <!-- END ACLs -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
index 424d216..0d22a65 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
@@ -151,6 +151,9 @@
   <property>
     <name>mapreduce.map.output.compress</name>
     <value>false</value>
+    <description>
+      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
+    </description>
   </property>
 
   <property>
@@ -296,11 +299,13 @@
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for map tasks.</description>
   </property>
 
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for reduce tasks.</description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
index 7d4d4fb..9bfd9ee 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
@@ -32,6 +32,7 @@
   <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>localhost:8025</value>
+    <description> The address of ResourceManager. </description>
   </property>
 
   <property>
@@ -84,11 +85,13 @@
   <property>
     <name>yarn.acl.enable</name>
     <value>true</value>
+    <description> Are acls enabled. </description>
   </property>
 
   <property>
     <name>yarn.admin.acl</name>
     <value>*</value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
   </property>
 
   <!-- NodeManager -->
@@ -144,6 +147,7 @@
   <property>
     <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
     <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+    <description>The auxiliary service class to use </description>
   </property>
 
   <property>
@@ -211,12 +215,13 @@
   <property>
     <name>yarn.log-aggregation-enable</name>
     <value>true</value>
-    <description>Whether to enable log aggregation</description>
+    <description>Whether to enable log aggregation. </description>
   </property>
 
   <property>
     <name>yarn.nodemanager.remote-app-log-dir</name>
     <value>/app-logs</value>
+    <description>Location to aggregate logs to. </description>
   </property>
 
   <property>


[2/3] git commit: AMBARI-4448. Some of the configuration parameters do not have description. (jaimin via yusaku)

Posted by yu...@apache.org.
AMBARI-4448. Some of the configuration parameters do not have description. (jaimin via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8017fec5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8017fec5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8017fec5

Branch: refs/heads/trunk
Commit: 8017fec53a522098a7c63b39a9187636dc3fd533
Parents: 9eb89a2
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Tue Jan 28 19:28:26 2014 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Tue Jan 28 19:32:06 2014 -0800

----------------------------------------------------------------------
 .../services/HBASE/configuration/hbase-site.xml |   3 +
 .../services/HDFS/configuration/core-site.xml   |   2 +
 .../services/HIVE/configuration/hive-site.xml   |  14 +
 .../configuration/mapred-queue-acls.xml         |  13 +
 .../MAPREDUCE/configuration/mapred-site.xml     |  40 +-
 .../services/OOZIE/configuration/oozie-site.xml | 408 +++++++--------
 .../services/HBASE/configuration/hbase-site.xml |   3 +
 .../services/HDFS/configuration/core-site.xml   |   2 +
 .../services/HIVE/configuration/hive-site.xml   |  14 +
 .../configuration/mapred-queue-acls.xml         |  13 +
 .../MAPREDUCE/configuration/mapred-site.xml     |  40 +-
 .../services/OOZIE/configuration/oozie-site.xml | 408 +++++++--------
 .../services/HBASE/configuration/hbase-site.xml |   3 +
 .../services/HDFS/configuration/core-site.xml   |   2 +
 .../services/HDFS/configuration/hdfs-site.xml   |   4 +
 .../services/HIVE/configuration/hive-site.xml   |  18 +
 .../configuration/mapred-queue-acls.xml         |  13 +
 .../MAPREDUCE2/configuration/mapred-site.xml    |   5 +
 .../services/OOZIE/configuration/oozie-site.xml | 504 ++++++++++---------
 .../YARN/configuration/capacity-scheduler.xml   |   5 +-
 .../services/YARN/configuration/yarn-site.xml   |   7 +-
 .../services/HBASE/configuration/hbase-site.xml |   3 +
 .../services/HDFS/configuration/core-site.xml   |   2 +
 .../services/HDFS/configuration/hdfs-site.xml   |   4 +
 .../services/HIVE/configuration/hive-site.xml   |  18 +
 .../services/OOZIE/configuration/oozie-site.xml | 504 ++++++++++---------
 .../YARN/configuration/capacity-scheduler.xml   |   5 +-
 .../YARN/configuration/mapred-queue-acls.xml    |  13 +
 .../services/YARN/configuration/mapred-site.xml |   5 +
 .../services/YARN/configuration/yarn-site.xml   |   7 +-
 30 files changed, 1180 insertions(+), 902 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
index 68904a1..bd4d61f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
@@ -255,6 +255,9 @@
   <property>
     <name>hbase.security.authentication</name>
     <value>simple</value>
+    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+      (no authentication), and 'kerberos'.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
index 8c43295..d2bff0f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
@@ -36,6 +36,8 @@
   <property>
     <name>io.serializations</name>
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
index 29ed54e..e5b8bf4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
@@ -121,16 +121,22 @@ limitations under the License.
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
+    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
+      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
+      process runs as.
+    </description>
   </property>
 
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
   </property>
 
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
+    <description>Disable local filesystem cache.</description>
   </property>
 
   <property>
@@ -154,11 +160,18 @@ limitations under the License.
   <property>
     <name>hive.optimize.bucketmapjoin</name>
     <value>true</value>
+    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
+      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
+      this parameter as true.
+    </description>
   </property>
 
   <property>
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+      of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
   </property>
 
   <property>
@@ -185,6 +198,7 @@ limitations under the License.
   <property>
     <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
     <value>true</value>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-queue-acls.xml
index ce12380..9389ed0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-queue-acls.xml
@@ -27,11 +27,24 @@
   <property>
     <name>mapred.queue.default.acl-submit-job</name>
     <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to submit jobs to the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to
+      submit jobs.
+    </description>
   </property>
 
   <property>
     <name>mapred.queue.default.acl-administer-jobs</name>
     <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to delete jobs or modify job's priority for jobs not owned by the current
+      user in the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to do
+      this operation.
+    </description>
   </property>
 
   <!-- END ACLs -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
index 1db37a8..bb54a93 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
@@ -35,7 +35,9 @@
   <property>
     <name>io.sort.record.percent</name>
     <value>.2</value>
-    <description>No description</description>
+    <description>The percentage of io.sort.mb dedicated to tracking record boundaries. Let this value be r, io.sort.mb be x.
+      The maximum number of records collected before the collection thread must block is equal to (r * x) / 4
+    </description>
   </property>
 
   <property>
@@ -47,7 +49,8 @@
   <property>
     <name>io.sort.factor</name>
     <value>100</value>
-    <description>No description</description>
+    <description>The number of streams to merge at once while sorting files. This determines the number of open file handles.
+    </description>
   </property>
 
   <!-- map/reduce properties -->
@@ -99,36 +102,44 @@
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
     <value>/hadoop/mapred</value>
-    <description>No description</description>
+    <description>The local directory where MapReduce stores intermediate data files. May be a comma-separated list of
+      directories on different devices in order to spread disk i/o. Directories that do not exist are ignored.
+    </description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapreduce.cluster.administrators</name>
     <value> hadoop</value>
+    <description>Cluster administrators. Irrespective of the job ACLs configured, cluster administrators always have
+      access to view and modify a job.
+    </description>
   </property>
 
   <property>
     <name>mapred.reduce.parallel.copies</name>
     <value>30</value>
-    <description>No description</description>
+    <description>The default number of parallel transfers run by reduce
+      during the copy(shuffle) phase.
+    </description>
   </property>
 
   <property>
     <name>mapred.tasktracker.map.tasks.maximum</name>
     <value>4</value>
-    <description>No description</description>
+    <description>The maximum number of map tasks that will be run simultaneously by a task tracker.</description>
   </property>
 
   <property>
     <name>mapred.tasktracker.reduce.tasks.maximum</name>
     <value>2</value>
-    <description>No description</description>
+    <description>The maximum number of reduce tasks that will be run simultaneously by a task tracker.</description>
   </property>
 
   <property>
     <name>tasktracker.http.threads</name>
     <value>50</value>
+    <description>The number of worker threads that for the http server. This is used for map output fetching.</description>
   </property>
 
   <property>
@@ -148,6 +159,7 @@
   <property>
     <name>mapred.reduce.slowstart.completed.maps</name>
     <value>0.05</value>
+    <description>Fraction of the number of maps in the job which should be complete before reduces are scheduled for the job.</description>
   </property>
 
   <property>
@@ -199,11 +211,13 @@
   <property>
     <name>mapred.jobtracker.completeuserjobs.maximum</name>
     <value>0</value>
+    <description>The maximum number of complete jobs per user to keep around before delegating them to the job history.</description>
   </property>
 
   <property>
     <name>mapred.jobtracker.taskScheduler</name>
     <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
+    <description>The class responsible for scheduling the tasks.</description>
   </property>
 
   <property>
@@ -253,7 +267,7 @@
   <property>
     <name>jetty.connector</name>
     <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
+    <description>The connector to be used by Jetty server.</description>
   </property>
 
   <property>
@@ -267,12 +281,12 @@
   <property>
     <name>mapred.child.root.logger</name>
     <value>INFO,TLA</value>
+    <description>Logger configuration for the TaskTracker child processes</description>
   </property>
 
   <property>
     <name>ambari.mapred.child.java.opts.memory</name>
     <value>768</value>
-
     <description>Java options Memory for the TaskTracker child processes</description>
   </property>
 
@@ -369,11 +383,13 @@
   <property>
     <name>mapred.healthChecker.interval</name>
     <value>135000</value>
+    <description>Frequency of the node health script to be run, in milliseconds</description>
   </property>
 
   <property>
     <name>mapred.healthChecker.script.timeout</name>
     <value>60000</value>
+    <description>Time after node health script should be killed if unresponsive and considered that the script has failed.</description>
   </property>
 
   <property>
@@ -406,17 +422,19 @@
   <property>
     <name>mapred.jobtracker.retirejob.check</name>
     <value>10000</value>
+    <description>Interval for the check for jobs to be retired. </description>
   </property>
 
   <property>
     <name>mapred.jobtracker.retirejob.interval</name>
     <value>21600000</value>
+    <description> Completed Job retirement interval. </description>
   </property>
 
   <property>
     <name>mapred.job.tracker.history.completed.location</name>
     <value>/mapred/history/done</value>
-    <description>No description</description>
+    <description>The completed job history files are stored at this single well known location.</description>
   </property>
 
   <property>
@@ -437,6 +455,7 @@
   <property>
     <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
     <value>false</value>
+    <description>Enable this flag to create _SUCCESS file for successful job. </description>
   </property>
 
   <property>
@@ -476,6 +495,9 @@
   <property>
     <name>hadoop.job.history.user.location</name>
     <value>none</value>
+    <description> Location to store the history files of a particular job. If set to none, then the job histories are
+      not collected to anywhere outside the master node.
+    </description>
     <final>true</final>
   </property>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-site.xml
index 57239c3..d8bb062 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-site.xml
@@ -15,223 +15,239 @@
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
--->     
+-->
 
 <configuration>
 
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
   <property>
     <name>oozie.base.url</name>
     <value>http://localhost:11000/oozie</value>
     <description>Base Oozie URL.</description>
-   </property>
+  </property>
 
   <property>
     <name>oozie.system.id</name>
     <value>oozie-${user.name}</value>
     <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.authorization.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
+      The Oozie system ID.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.systemmode</name>
+    <value>NORMAL</value>
+    <description>
+      System mode for Oozie at startup.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.AuthorizationService.authorization.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.older.than</name>
+    <value>30</value>
+    <description>
+      Jobs older than this value, in days, will be purged by the PurgeService.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.purge.interval</name>
+    <value>3600</value>
+    <description>
+      Interval at which the purge service will run, in seconds.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.queue.size</name>
+    <value>1000</value>
+    <description>Max callable queue size</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.threads</name>
+    <value>10</value>
+    <description>Number of threads used for executing callables</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.callable.concurrency</name>
+    <value>3</value>
+    <description>
+      Maximum concurrency for a given callable type.
+      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+      All commands that use action executors (action-start, action-end, action-kill and action-check) use
+      the action type as the callable type.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.coord.normal.default.timeout</name>
+    <value>120</value>
+    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <description>
       Oozie DataBase Name
-     </description>
-   </property>
+    </description>
+  </property>
 
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
+  <property>
+    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+    <value></value>
+    <description>
       Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
+      #AUTHENTICATION_HANDLER_CLASSNAME#.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+    <value></value>
+    <description>
+      Whitelisted NameNode for Oozie service.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.WorkflowAppService.system.libpath</name>
+    <value>/user/${user.name}/share/lib</value>
+    <description>
       System library path to use for workflow applications.
       This path is added to workflow application if their job properties sets
       the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
+    </description>
+  </property>
 
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
+  <property>
+    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+    <value>false</value>
+    <description>
       If set to true, submissions of MapReduce and Pig jobs will include
       automatically the system library path, thus not requiring users to
       specify where the Pig JAR files are. Instead, the ones from the system
       library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>DEFAULT</value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>org.apache.oozie.action.email.EmailActionExecutor,
-org.apache.oozie.action.hadoop.HiveActionExecutor,
-org.apache.oozie.action.hadoop.ShellActionExecutor,
-org.apache.oozie.action.hadoop.SqoopActionExecutor,
-org.apache.oozie.action.hadoop.DistcpActionExecutor</value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>oozie</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
+    </description>
+  </property>
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <value>DEFAULT</value>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/etc/hadoop/conf</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.ActionService.executor.ext.classes</name>
+    <value>org.apache.oozie.action.email.EmailActionExecutor,
+      org.apache.oozie.action.hadoop.HiveActionExecutor,
+      org.apache.oozie.action.hadoop.ShellActionExecutor,
+      org.apache.oozie.action.hadoop.SqoopActionExecutor,
+      org.apache.oozie.action.hadoop.DistcpActionExecutor
+    </value>
+    <description>
+      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
+      be used in workflows. This property is a convenience property to add extensions to the built in executors without
+      having to include all the built in ones.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    <value>
+      shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd
+    </value>
+    <description>
+      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
+      trims the value, if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.create.db.schema</name>
+    <value>false</value>
+    <description>
+      Creates Oozie DB.
+
+      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <description>
+      JDBC driver class.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <description>
+      JDBC URL.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>oozie</value>
+    <description>
+      DB user name.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value></value>
+    <description>
+      DB user password.
+
+      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.pool.max.active.conn</name>
+    <value>10</value>
+    <description>
+      Max number of connections.
+    </description>
+  </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
index 68904a1..bd4d61f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
@@ -255,6 +255,9 @@
   <property>
     <name>hbase.security.authentication</name>
     <value>simple</value>
+    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+      (no authentication), and 'kerberos'.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
index 8c43295..d2bff0f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
@@ -36,6 +36,8 @@
   <property>
     <name>io.serializations</name>
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
index 29ed54e..e5b8bf4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
@@ -121,16 +121,22 @@ limitations under the License.
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
+    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
+      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
+      process runs as.
+    </description>
   </property>
 
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
   </property>
 
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
+    <description>Disable local filesystem cache.</description>
   </property>
 
   <property>
@@ -154,11 +160,18 @@ limitations under the License.
   <property>
     <name>hive.optimize.bucketmapjoin</name>
     <value>true</value>
+    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
+      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
+      this parameter as true.
+    </description>
   </property>
 
   <property>
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+      of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
   </property>
 
   <property>
@@ -185,6 +198,7 @@ limitations under the License.
   <property>
     <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
     <value>true</value>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
index ce12380..9389ed0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
@@ -27,11 +27,24 @@
   <property>
     <name>mapred.queue.default.acl-submit-job</name>
     <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to submit jobs to the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to
+      submit jobs.
+    </description>
   </property>
 
   <property>
     <name>mapred.queue.default.acl-administer-jobs</name>
     <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to delete jobs or modify job's priority for jobs not owned by the current
+      user in the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to do
+      this operation.
+    </description>
   </property>
 
   <!-- END ACLs -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
index 1db37a8..bb54a93 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
@@ -35,7 +35,9 @@
   <property>
     <name>io.sort.record.percent</name>
     <value>.2</value>
-    <description>No description</description>
+    <description>The percentage of io.sort.mb dedicated to tracking record boundaries. Let this value be r, io.sort.mb be x.
+      The maximum number of records collected before the collection thread must block is equal to (r * x) / 4
+    </description>
   </property>
 
   <property>
@@ -47,7 +49,8 @@
   <property>
     <name>io.sort.factor</name>
     <value>100</value>
-    <description>No description</description>
+    <description>The number of streams to merge at once while sorting files. This determines the number of open file handles.
+    </description>
   </property>
 
   <!-- map/reduce properties -->
@@ -99,36 +102,44 @@
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
     <value>/hadoop/mapred</value>
-    <description>No description</description>
+    <description>The local directory where MapReduce stores intermediate data files. May be a comma-separated list of
+      directories on different devices in order to spread disk i/o. Directories that do not exist are ignored.
+    </description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapreduce.cluster.administrators</name>
     <value> hadoop</value>
+    <description>Cluster administrators. Irrespective of the job ACLs configured, cluster administrators always have
+      access to view and modify a job.
+    </description>
   </property>
 
   <property>
     <name>mapred.reduce.parallel.copies</name>
     <value>30</value>
-    <description>No description</description>
+    <description>The default number of parallel transfers run by reduce
+      during the copy(shuffle) phase.
+    </description>
   </property>
 
   <property>
     <name>mapred.tasktracker.map.tasks.maximum</name>
     <value>4</value>
-    <description>No description</description>
+    <description>The maximum number of map tasks that will be run simultaneously by a task tracker.</description>
   </property>
 
   <property>
     <name>mapred.tasktracker.reduce.tasks.maximum</name>
     <value>2</value>
-    <description>No description</description>
+    <description>The maximum number of reduce tasks that will be run simultaneously by a task tracker.</description>
   </property>
 
   <property>
     <name>tasktracker.http.threads</name>
     <value>50</value>
+    <description>The number of worker threads that for the http server. This is used for map output fetching.</description>
   </property>
 
   <property>
@@ -148,6 +159,7 @@
   <property>
     <name>mapred.reduce.slowstart.completed.maps</name>
     <value>0.05</value>
+    <description>Fraction of the number of maps in the job which should be complete before reduces are scheduled for the job.</description>
   </property>
 
   <property>
@@ -199,11 +211,13 @@
   <property>
     <name>mapred.jobtracker.completeuserjobs.maximum</name>
     <value>0</value>
+    <description>The maximum number of complete jobs per user to keep around before delegating them to the job history.</description>
   </property>
 
   <property>
     <name>mapred.jobtracker.taskScheduler</name>
     <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
+    <description>The class responsible for scheduling the tasks.</description>
   </property>
 
   <property>
@@ -253,7 +267,7 @@
   <property>
     <name>jetty.connector</name>
     <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
+    <description>The connector to be used by Jetty server.</description>
   </property>
 
   <property>
@@ -267,12 +281,12 @@
   <property>
     <name>mapred.child.root.logger</name>
     <value>INFO,TLA</value>
+    <description>Logger configuration for the TaskTracker child processes</description>
   </property>
 
   <property>
     <name>ambari.mapred.child.java.opts.memory</name>
     <value>768</value>
-
     <description>Java options Memory for the TaskTracker child processes</description>
   </property>
 
@@ -369,11 +383,13 @@
   <property>
     <name>mapred.healthChecker.interval</name>
     <value>135000</value>
+    <description>Frequency of the node health script to be run, in milliseconds</description>
   </property>
 
   <property>
     <name>mapred.healthChecker.script.timeout</name>
     <value>60000</value>
+    <description>Time after node health script should be killed if unresponsive and considered that the script has failed.</description>
   </property>
 
   <property>
@@ -406,17 +422,19 @@
   <property>
     <name>mapred.jobtracker.retirejob.check</name>
     <value>10000</value>
+    <description>Interval for the check for jobs to be retired. </description>
   </property>
 
   <property>
     <name>mapred.jobtracker.retirejob.interval</name>
     <value>21600000</value>
+    <description> Completed Job retirement interval. </description>
   </property>
 
   <property>
     <name>mapred.job.tracker.history.completed.location</name>
     <value>/mapred/history/done</value>
-    <description>No description</description>
+    <description>The completed job history files are stored at this single well known location.</description>
   </property>
 
   <property>
@@ -437,6 +455,7 @@
   <property>
     <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
     <value>false</value>
+    <description>Enable this flag to create _SUCCESS file for successful job. </description>
   </property>
 
   <property>
@@ -476,6 +495,9 @@
   <property>
     <name>hadoop.job.history.user.location</name>
     <value>none</value>
+    <description> Location to store the history files of a particular job. If set to none, then the job histories are
+      not collected to anywhere outside the master node.
+    </description>
     <final>true</final>
   </property>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-site.xml
index 57239c3..d8bb062 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/configuration/oozie-site.xml
@@ -15,223 +15,239 @@
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
--->     
+-->
 
 <configuration>
 
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
   <property>
     <name>oozie.base.url</name>
     <value>http://localhost:11000/oozie</value>
     <description>Base Oozie URL.</description>
-   </property>
+  </property>
 
   <property>
     <name>oozie.system.id</name>
     <value>oozie-${user.name}</value>
     <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.authorization.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
+      The Oozie system ID.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.systemmode</name>
+    <value>NORMAL</value>
+    <description>
+      System mode for Oozie at startup.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.AuthorizationService.authorization.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.older.than</name>
+    <value>30</value>
+    <description>
+      Jobs older than this value, in days, will be purged by the PurgeService.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.purge.interval</name>
+    <value>3600</value>
+    <description>
+      Interval at which the purge service will run, in seconds.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.queue.size</name>
+    <value>1000</value>
+    <description>Max callable queue size</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.threads</name>
+    <value>10</value>
+    <description>Number of threads used for executing callables</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.callable.concurrency</name>
+    <value>3</value>
+    <description>
+      Maximum concurrency for a given callable type.
+      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+      All commands that use action executors (action-start, action-end, action-kill and action-check) use
+      the action type as the callable type.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.coord.normal.default.timeout</name>
+    <value>120</value>
+    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <description>
       Oozie DataBase Name
-     </description>
-   </property>
+    </description>
+  </property>
 
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
+  <property>
+    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+    <value></value>
+    <description>
       Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
+      #AUTHENTICATION_HANDLER_CLASSNAME#.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+    <value></value>
+    <description>
+      Whitelisted NameNode for Oozie service.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.WorkflowAppService.system.libpath</name>
+    <value>/user/${user.name}/share/lib</value>
+    <description>
       System library path to use for workflow applications.
       This path is added to workflow application if their job properties sets
       the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
+    </description>
+  </property>
 
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
+  <property>
+    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+    <value>false</value>
+    <description>
       If set to true, submissions of MapReduce and Pig jobs will include
       automatically the system library path, thus not requiring users to
       specify where the Pig JAR files are. Instead, the ones from the system
       library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>DEFAULT</value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>org.apache.oozie.action.email.EmailActionExecutor,
-org.apache.oozie.action.hadoop.HiveActionExecutor,
-org.apache.oozie.action.hadoop.ShellActionExecutor,
-org.apache.oozie.action.hadoop.SqoopActionExecutor,
-org.apache.oozie.action.hadoop.DistcpActionExecutor</value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>oozie</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
+    </description>
+  </property>
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <value>DEFAULT</value>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/etc/hadoop/conf</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.ActionService.executor.ext.classes</name>
+    <value>org.apache.oozie.action.email.EmailActionExecutor,
+      org.apache.oozie.action.hadoop.HiveActionExecutor,
+      org.apache.oozie.action.hadoop.ShellActionExecutor,
+      org.apache.oozie.action.hadoop.SqoopActionExecutor,
+      org.apache.oozie.action.hadoop.DistcpActionExecutor
+    </value>
+    <description>
+      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
+      be used in workflows. This property is a convenience property to add extensions to the built in executors without
+      having to include all the built in ones.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    <value>
+      shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd
+    </value>
+    <description>
+      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
+      trims the value, if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.create.db.schema</name>
+    <value>false</value>
+    <description>
+      Creates Oozie DB.
+
+      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <description>
+      JDBC driver class.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <description>
+      JDBC URL.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>oozie</value>
+    <description>
+      DB user name.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value></value>
+    <description>
+      DB user password.
+
+      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.pool.max.active.conn</name>
+    <value>10</value>
+    <description>
+      Max number of connections.
+    </description>
+  </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
index bf4af7d..d1e933d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
@@ -263,6 +263,9 @@
   <property>
     <name>hbase.security.authentication</name>
     <value>simple</value>
+    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+      (no authentication), and 'kerberos'.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
index e244fc7..356cba4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
@@ -36,6 +36,8 @@
   <property>
     <name>io.serializations</name>
     <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
index 79f6cd5..aee8236 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
@@ -491,6 +491,10 @@ don't exist, they will be created with this permission.</description>
   <property>
     <name>dfs.domain.socket.path</name>
     <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>
+      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
+      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
index b3ed5f5..899cfe9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
@@ -134,16 +134,22 @@ limitations under the License.
   <property>
     <name>hive.server2.enable.doAs</name>
     <value>true</value>
+    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
+      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
+      process runs as.
+    </description>
   </property>
 
   <property>
     <name>fs.hdfs.impl.disable.cache</name>
     <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
   </property>
 
   <property>
     <name>fs.file.impl.disable.cache</name>
     <value>true</value>
+    <description>Disable local filesystem cache.</description>
   </property>
 
   <property>
@@ -167,11 +173,18 @@ limitations under the License.
   <property>
     <name>hive.optimize.bucketmapjoin</name>
     <value>true</value>
+    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
+      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
+      this parameter as true.
+    </description>
   </property>
 
   <property>
     <name>hive.optimize.bucketmapjoin.sortedmerge</name>
     <value>true</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+      of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
   </property>
 
   <property>
@@ -198,6 +211,7 @@ limitations under the License.
   <property>
     <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
     <value>true</value>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
   </property>
 
   <property>
@@ -249,11 +263,15 @@ limitations under the License.
   <property>
     <name>hive.vectorized.execution.enabled</name>
     <value>false</value>
+    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
+    </description>
   </property>
 
   <property>
     <name>hive.optimize.reducededuplication</name>
     <value>true</value>
+    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+    </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
index ce12380..9389ed0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
@@ -27,11 +27,24 @@
   <property>
     <name>mapred.queue.default.acl-submit-job</name>
     <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to submit jobs to the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to
+      submit jobs.
+    </description>
   </property>
 
   <property>
     <name>mapred.queue.default.acl-administer-jobs</name>
     <value>*</value>
+    <description> Comma separated list of user and group names that are allowed
+      to delete jobs or modify job's priority for jobs not owned by the current
+      user in the 'default' queue. The user list and the group list
+      are separated by a blank. For e.g. alice,bob group1,group2.
+      If set to the special value '*', it means all users are allowed to do
+      this operation.
+    </description>
   </property>
 
   <!-- END ACLs -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/8017fec5/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
index 424d216..6691f8f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
@@ -151,6 +151,9 @@
   <property>
     <name>mapreduce.map.output.compress</name>
     <value>false</value>
+    <description>
+    Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
+    </description>
   </property>
 
   <property>
@@ -296,11 +299,13 @@
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for map tasks.</description>
   </property>
 
   <property>
     <name>mapreduce.admin.reduce.child.java.opts</name>
     <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>This property stores Java options for reduce tasks.</description>
   </property>
 
   <property>


[3/3] git commit: AMBARI-4449. Minor label change for Tez. (yusaku)

Posted by yu...@apache.org.
AMBARI-4449. Minor label change for Tez. (yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9eb89a2f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9eb89a2f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9eb89a2f

Branch: refs/heads/trunk
Commit: 9eb89a2fa1ce20259ab373819fe35122c63e581d
Parents: 4a008bd
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Tue Jan 28 19:26:33 2014 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Tue Jan 28 19:32:06 2014 -0800

----------------------------------------------------------------------
 ambari-web/app/data/service_components.js | 2 +-
 ambari-web/app/data/services.js           | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9eb89a2f/ambari-web/app/data/service_components.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/service_components.js b/ambari-web/app/data/service_components.js
index 4590f07..232b328 100644
--- a/ambari-web/app/data/service_components.js
+++ b/ambari-web/app/data/service_components.js
@@ -103,7 +103,7 @@ module.exports = new Ember.Set([
   {
     service_name: 'TEZ',
     component_name: 'TEZ_CLIENT',
-    display_name: 'TEZ Client',
+    display_name: 'Tez Client',
     isMaster: false,
     isClient: true,
     description: ''

http://git-wip-us.apache.org/repos/asf/ambari/blob/9eb89a2f/ambari-web/app/data/services.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/data/services.js b/ambari-web/app/data/services.js
index 8da8346..ca696a7 100644
--- a/ambari-web/app/data/services.js
+++ b/ambari-web/app/data/services.js
@@ -62,7 +62,7 @@ module.exports = [
   },
   {
     serviceName: 'TEZ',
-    displayName: 'TEZ',
+    displayName: 'Tez',
     isDisabled: false,
     isSelected: true,
     canBeSelected: true,