You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2013/03/20 21:44:50 UTC

svn commit: r1459041 [2/18] - in /incubator/ambari/branches/branch-1.2: ./ ambari-agent/ ambari-agent/conf/unix/ ambari-agent/src/main/puppet/modules/hdp-ganglia/files/ ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/ ambari-agent/src/main/p...

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp Wed Mar 20 20:44:43 2013
@@ -85,10 +85,14 @@ class hdp-hadoop::namenode(
        service_state => $service_state
     }
 
+    hdp-hadoop::namenode::create_user_directories { 'create_user_directories' :
+       service_state => $service_state
+    }
+
     #top level does not need anchors
     Class['hdp-hadoop'] ->  Hdp-hadoop::Service['namenode']
     Hdp-hadoop::Namenode::Create_name_dirs<||> -> Hdp-hadoop::Service['namenode'] 
-    Hdp-hadoop::Service['namenode'] -> Hdp-hadoop::Namenode::Create_app_directories<||>
+    Hdp-hadoop::Service['namenode'] -> Hdp-hadoop::Namenode::Create_app_directories<||> -> Hdp-hadoop::Namenode::Create_user_directories<||>
     if ($service_state == 'running' and $format == true) {
       Class['hdp-hadoop'] -> Class['hdp-hadoop::namenode::format'] -> Hdp-hadoop::Service['namenode']
       Hdp-hadoop::Namenode::Create_name_dirs<||> -> Class['hdp-hadoop::namenode::format']
@@ -111,15 +115,8 @@ define hdp-hadoop::namenode::create_name
 
 define hdp-hadoop::namenode::create_app_directories($service_state)
 {
+
   if ($service_state == 'running') {
-    $smoke_test_user = $hdp::params::smokeuser
-    $smoke_hdfs_user_dir = $hdp::params::smoke_hdfs_user_dir
-    hdp-hadoop::hdfs::directory{ $smoke_hdfs_user_dir:
-      service_state => $service_state,
-      owner => $smoke_test_user,
-      mode  => '770',
-      recursive_chmod => true
-    }
    
     hdp-hadoop::hdfs::directory{ "/tmp" :
       service_state => $service_state,
@@ -147,46 +144,87 @@ define hdp-hadoop::namenode::create_app_
 
     if ($hdp::params::hive_server_host != "") {
       $hive_user = $hdp::params::hive_user
+      $hive_apps_whs_dir = $hdp::params::hive_apps_whs_dir
 
-      hdp-hadoop::hdfs::directory{ '/apps/hive/warehouse':
+      hdp-hadoop::hdfs::directory{ $hive_apps_whs_dir:
         service_state   => $service_state,
         owner            => $hive_user,
         mode             => '777',
         recursive_chmod  => true
       }
-      hdp-hadoop::hdfs::directory{ $hive_hdfs_user_dir:
+    }
+
+    if ($hdp::params::webhcat_server_host != "") {
+      $webhcat_user = $hdp::params::webhcat_user
+      $webhcat_apps_dir = $hdp::params::webhcat_apps_dir
+
+      hdp-hadoop::hdfs::directory{ $webhcat_apps_dir:
         service_state => $service_state,
-        owner         => $hive_user
+        owner => $webhcat_user,
+        mode  => '755',
+        recursive_chmod => true
       }
     }
+  }
+}
+
+
+define hdp-hadoop::namenode::create_user_directories($service_state)
+{
+  if ($service_state == 'running') {
+    $smoke_hdfs_user_dir = $hdp::params::smoke_hdfs_user_dir
+
+    $smoke_user_dir_item="$smoke_hdfs_user_dir,"
+
+    if ($hdp::params::hive_server_host != "") {
+      $hive_hdfs_user_dir = $hdp::params::hive_hdfs_user_dir
+      $hive_dir_item="$hive_hdfs_user_dir,"
+    } else {
+    $hive_dir_item=""
+    }
 
     if ($hdp::params::oozie_server != "") {
-      $oozie_user = $hdp::params::oozie_user
       $oozie_hdfs_user_dir = $hdp::params::oozie_hdfs_user_dir
-      hdp-hadoop::hdfs::directory{ $oozie_hdfs_user_dir:
-        service_state => $service_state,
-        owner => $oozie_user,
-        mode  => '775',
-        recursive_chmod => true
-      }
+      $oozie_dir_item="$oozie_hdfs_user_dir,"
+    } else {
+      $oozie_dir_item=""
     }
     
     if ($hdp::params::webhcat_server_host != "") {
-      $templeton_user = $hdp::params::templeton_user
       $hcat_hdfs_user_dir = $hdp::params::hcat_hdfs_user_dir
-      hdp-hadoop::hdfs::directory{ $hcat_hdfs_user_dir:
-        service_state => $service_state,
-        owner => $templeton_user,
-        mode  => '755',
-        recursive_chmod => true
-      }
-
-      hdp-hadoop::hdfs::directory{ '/apps/webhcat':
-        service_state => $service_state,
-        owner => $templeton_user,
-        mode  => '755',
-        recursive_chmod => true
+      $webhcat_hdfs_user_dir = $hdp::params::webhcat_hdfs_user_dir
+      $webhcat_dir_item="$webhcat_hdfs_user_dir,"
+      if ($hcat_hdfs_user_dir != webhcat_hdfs_user_dir) {
+        $hcat_dir_item="$hcat_hdfs_user_dir,"
+      } else {
+        $hcat_dir_item=""
       }
+    } else {
+      $webhcat_dir_item=""
     }
+
+    $users_dir_list_comm_sep = "$smoke_user_dir_item $hive_dir_item $oozie_dir_item $hcat_dir_item $webhcat_dir_item"
+
+    #Get unique users directories set
+    $users_dirs_set = hdp_set_from_comma_list($users_dir_list_comm_sep)
+
+    hdp-hadoop::namenode::create_user_directory{$users_dirs_set:
+      service_state => $service_state}
   }
+  
 }
+
+define hdp-hadoop::namenode::create_user_directory($service_state)
+{
+  
+  $owner = hdp_hadoop_get_owner($name)
+  $mode = hdp_hadoop_get_mode($name)
+  debug("## Creating user directory: $name, owner: $owner, mode: $mode")
+  hdp-hadoop::hdfs::directory{ $name:
+   service_state   => $service_state,
+   mode            => $mode,
+   owner           => $owner,
+   recursive_chmod => true
+  }
+}
+

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp Wed Mar 20 20:44:43 2013
@@ -45,7 +45,7 @@ class hdp-hadoop::params(
     $enable_security_authorization = false
     $security_type = "simple"
     $task_controller = "org.apache.hadoop.mapred.DefaultTaskController"
-    $dfs_datanode_address = 50010
+    $dfs_datanode_address = 50075
     $dfs_datanode_http_address = 50075
   }
 
@@ -58,8 +58,8 @@ class hdp-hadoop::params(
 
   $hdfs_log_dir_prefix = hdp_default("hadoop/hadoop-env/hdfs_log_dir_prefix","/var/log/hadoop")
 
-  $hadoop_piddirprefix = hdp_default("hadoop/hadoop-env/hadoop_piddirprefix","/var/run/hadoop")
-  $run_dir = $hadoop_piddirprefix
+  $hadoop_pid_dir_prefix = hdp_default("hadoop/hadoop-env/hadoop_pid_dir_prefix","/var/run/hadoop")
+  $run_dir = $hadoop_pid_dir_prefix
 
   $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
 

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp Wed Mar 20 20:44:43 2013
@@ -30,7 +30,7 @@ define hdp-hadoop::service(
   $security_enabled = $hdp::params::security_enabled
 
   #NOTE does not work if namenode and datanode are on same host 
-  $pid_dir = "${hdp-hadoop::params::hadoop_piddirprefix}/${user}"
+  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${user}"
   
   if (($security_enabled == true) and ($name == 'datanode')) {
     $run_as_root = true
@@ -40,7 +40,7 @@ define hdp-hadoop::service(
 
   if (($security_enabled == true) and ($name == 'datanode')) {
     $hdfs_user = $hdp::params::hdfs_user
-    $pid_file = "${hdp-hadoop::params::hadoop_piddirprefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
+    $pid_file = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
   } else {
     $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
   } 

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb Wed Mar 20 20:44:43 2013
@@ -78,8 +78,8 @@ export HADOOP_SECURE_DN_LOG_DIR=<%=scope
 # export HADOOP_SLAVE_SLEEP=0.1
 
 # The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$USER
-export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$HADOOP_SECURE_DN_USER
+export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$USER
+export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$HADOOP_SECURE_DN_USER
 
 # A string representing this instance of hadoop. $USER by default.
 export HADOOP_IDENT_STRING=$USER

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-hcat/manifests/init.pp Wed Mar 20 20:44:43 2013
@@ -23,6 +23,7 @@ class hdp-hcat(
 ) inherits hdp-hcat::params
 {
   $hcat_config_dir = $hdp-hcat::params::hcat_conf_dir
+  $hcat_pid_dir = $hdp-hcat::params::hcat_pid_dir
 
   if ($hdp::params::use_32_bits_on_slaves == false) {
     $size = 64
@@ -42,7 +43,12 @@ class hdp-hcat(
       force => true
     }
 
-    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir]
+    hdp::directory { $hcat_pid_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir] -> Hdp::Directory[$hcat_pid_dir]
 
   } elsif ($service_state == 'installed_and_configured') {
     hdp::package { 'hcat' : 
@@ -54,9 +60,22 @@ class hdp-hcat(
       force => true
     }
 
+    hdp::directory_recursive_create { $hcat_pid_dir:
+      owner => $webhcat_user,
+      service_state => $service_state,
+      force => true
+    }
+
+    hdp::user{ $webhcat_user:}
+
+    if ($webhcat_user != $hcat_user) {
+      hdp::user { $hcat_user:}
+    }
+
     hdp-hcat::configfile { 'hcat-env.sh':}
   
-    Hdp::Package['hcat'] -> Hdp::Directory[$hcat_config_dir] -> Hdp-hcat::Configfile<||> 
+    Hdp::Package['hcat'] -> Hdp::User<|title == $webhcat_user or title == $hcat_user|>  -> Hdp::Directory[$hcat_config_dir] -> Hdp::Directory_recursive_create[$hcat_pid_dir] -> Hdp-hcat::Configfile<||> 
+
  } else {
     hdp_fail("TODO not implemented yet: service_state = ${service_state}")
   }

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server.pp Wed Mar 20 20:44:43 2013
@@ -149,7 +149,7 @@ class hdp-nagios::server(
     if ($service_state == 'installed_and_configured') {
       $webserver_state = 'restart'
     } elsif ($service_state == 'running') {
-      $webserver_state = 'running'
+      $webserver_state = 'restart'
     } else {
       # We are never stopping httpd
       #$webserver_state = $service_state

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-nagios/templates/hadoop-services.cfg.erb Wed Mar 20 20:44:43 2013
@@ -47,7 +47,7 @@ define service {
         use                     hadoop-service
         service_description     HDFS::Percent DataNodes storage full
         servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::Storage full"!10%!30%
+        check_command           check_aggregate!"DATANODE::DataNode storage full"!10%!30%
         normal_check_interval   2
         retry_check_interval    1 
         max_check_attempts      1
@@ -58,7 +58,7 @@ define service {
         use                     hadoop-service
         service_description     HDFS::Percent DataNodes down
         servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::Process down"!10%!30%
+        check_command           check_aggregate!"DATANODE::DataNode process down"!10%!30%
         normal_check_interval   0.5
         retry_check_interval    0.25
         max_check_attempts      3
@@ -70,7 +70,7 @@ define service {
         use                     hadoop-service
         service_description     MAPREDUCE::Percent TaskTrackers down
         servicegroups           MAPREDUCE
-        check_command           check_aggregate!"TASKTRACKER::Process down"!10%!30%
+        check_command           check_aggregate!"TASKTRACKER::TaskTracker process down"!10%!30%
         normal_check_interval   0.5
         retry_check_interval    0.25
         max_check_attempts      3
@@ -81,9 +81,9 @@ define service {
 define service {
         hostgroup_name          nagios-server
         use                     hadoop-service
-        service_description     ZOOKEEPER::Percent zookeeper servers down
+        service_description     ZOOKEEPER::Percent ZooKeeper Servers down
         servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZKSERVERS::ZKSERVERS Process down"!35%!70%
+        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process down"!35%!70%
         normal_check_interval   0.5
         retry_check_interval    0.25
         max_check_attempts      3
@@ -95,9 +95,9 @@ define service {
 define service {
         hostgroup_name          nagios-server
         use                     hadoop-service
-        service_description     HBASE::Percent region servers down
+        service_description     HBASE::Percent RegionServers down
         servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::Process down"!10%!30%
+        check_command           check_aggregate!"REGIONSERVER::RegionServer process down"!10%!30%
         normal_check_interval   0.5
         retry_check_interval    0.25
         max_check_attempts      3
@@ -110,7 +110,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia [gmetad] Process down
+        service_description     GANGLIA::Ganglia [gmetad] process down
         servicegroups           GANGLIA
         check_command           check_tcp!8651!-w 1 -c 1
         normal_check_interval   0.25
@@ -121,7 +121,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for slaves
+        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for slaves
         servicegroups           GANGLIA
         check_command           check_tcp!8660!-w 1 -c 1
         normal_check_interval   0.25
@@ -132,7 +132,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for namenode
+        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for NameNode
         servicegroups           GANGLIA
         check_command           check_tcp!8661!-w 1 -c 1
         normal_check_interval   0.25
@@ -143,7 +143,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for jobtracker
+        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for JobTracker
         servicegroups           GANGLIA
         check_command           check_tcp!8662!-w 1 -c 1
         normal_check_interval   0.25
@@ -155,7 +155,7 @@ define service {
 define service {
         hostgroup_name          ganglia-server
         use                     hadoop-service
-        service_description     GANGLIA::Ganglia collector [gmond] Process down alert for hbasemaster
+        service_description     GANGLIA::Ganglia Collector [gmond] process down alert for HBase Master
         servicegroups           GANGLIA
         check_command           check_tcp!8663!-w 1 -c 1
         normal_check_interval   0.25
@@ -170,7 +170,7 @@ define service {
 define service {
         hostgroup_name          snamenode
         use                     hadoop-service
-        service_description     NAMENODE::Secondary Namenode Process down
+        service_description     NAMENODE::Secondary NameNode process down
         servicegroups           HDFS
         check_command           check_tcp!50090!-w 1 -c 1
         normal_check_interval   0.5
@@ -183,7 +183,7 @@ define service {
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     NAMENODE::Namenode Web UI down
+        service_description     NAMENODE::NameNode Web UI down
         servicegroups           HDFS
         check_command           check_webui!namenode
         normal_check_interval   1
@@ -194,7 +194,7 @@ define service {
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     NAMENODE::Namenode Edit logs directory status
+        service_description     NAMENODE::NameNode edit logs directory status
         servicegroups           HDFS
         check_command           check_name_dir_status!50070
         normal_check_interval   0.5
@@ -205,7 +205,7 @@ define service {
 define service {        
         hostgroup_name          namenode        
         use                     hadoop-service
-        service_description     NAMENODE::Namenode Host CPU utilization
+        service_description     NAMENODE::NameNode host CPU utilization
         servicegroups           HDFS
         check_command           check_cpu!200%!250%
         normal_check_interval   5
@@ -217,7 +217,7 @@ define service {        
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     NAMENODE::Namenode Process down
+        service_description     NAMENODE::NameNode process down
         servicegroups           HDFS
         check_command           check_tcp!8020!-w 1 -c 1
         normal_check_interval   0.5
@@ -239,7 +239,7 @@ define service {
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     HDFS::HDFS Capacity utilization
+        service_description     HDFS::HDFS capacity utilization
         servicegroups           HDFS
         check_command           check_hdfs_capacity!50070!80%!90%
         normal_check_interval   10
@@ -250,7 +250,7 @@ define service {
 define service {
         hostgroup_name          namenode
         use                     hadoop-service
-        service_description     HDFS::Namenode RPC Latency
+        service_description     HDFS::NameNode RPC latency
         servicegroups           HDFS
         check_command           check_rpcq_latency!NameNode!50070!3000!5000
         normal_check_interval   5
@@ -286,7 +286,7 @@ define service {
 define service {
         hostgroup_name          jobtracker
         use                     hadoop-service
-        service_description     JOBTRACKER::Jobtracker CPU utilization
+        service_description     JOBTRACKER::JobTracker CPU utilization
         servicegroups           MAPREDUCE
         check_command           check_cpu!200%!250%
         normal_check_interval   5
@@ -298,7 +298,7 @@ define service {
 define service {
         hostgroup_name          jobtracker
         use                     hadoop-service
-        service_description     JOBTRACKER::Jobtracker Process down
+        service_description     JOBTRACKER::JobTracker process down
         servicegroups           MAPREDUCE
         check_command           check_tcp!50030!-w 1 -c 1
         normal_check_interval   0.5
@@ -309,13 +309,26 @@ define service {
 define service {
         hostgroup_name          jobtracker
         use                     hadoop-service
-        service_description     MAPREDUCE::JobTracker RPC Latency
+        service_description     MAPREDUCE::JobTracker RPC latency
         servicegroups           MAPREDUCE
         check_command           check_rpcq_latency!JobTracker!50030!3000!5000
         normal_check_interval   5
         retry_check_interval    1 
         max_check_attempts      5
 }
+
+# MAPREDUCE::TASKTRACKER Checks 
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     TASKTRACKER::TaskTracker process down
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!50060!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
 <%end-%>
 
 <%if scope.function_hdp_nagios_members_exist('slaves')-%>
@@ -323,7 +336,7 @@ define service {
 define service {
         hostgroup_name          slaves
         use                     hadoop-service
-        service_description     DATANODE::Process down
+        service_description     DATANODE::DataNode process down
         servicegroups           HDFS
         check_command           check_tcp!<%=scope.function_hdp_template_var("dfs_datanode_address")%>!-w 1 -c 1
         normal_check_interval   1
@@ -334,7 +347,7 @@ define service {
 define service {
         hostgroup_name          slaves
         use                     hadoop-service
-        service_description     DATANODE::Storage full
+        service_description     DATANODE::DataNode storage full
         servicegroups           HDFS
         check_command           check_datanode_storage!<%=scope.function_hdp_template_var("dfs_datanode_http_address")%>!90%!90%
         normal_check_interval   5
@@ -342,17 +355,6 @@ define service {
         max_check_attempts      2
 }
 
-# MAPREDUCE::TASKTRACKER Checks 
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     TASKTRACKER::Process down
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!50060!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
 <%end-%>
 
 <%if scope.function_hdp_nagios_members_exist('zookeeper-servers')-%>
@@ -360,7 +362,7 @@ define service {
 define service {
         hostgroup_name          zookeeper-servers
         use                     hadoop-service
-        service_description     ZKSERVERS::ZKSERVERS Process down
+        service_description     ZOOKEEPER::ZooKeeper Server process down
         servicegroups           ZOOKEEPER
         check_command           check_tcp!2181!-w 1 -c 1
         normal_check_interval   1
@@ -374,7 +376,7 @@ define service {
 define service {
         hostgroup_name          region-servers
         use                     hadoop-service
-        service_description     REGIONSERVER::Process down
+        service_description     REGIONSERVER::RegionServer process down
         servicegroups           HBASE
         check_command           check_tcp!60020!-w 1 -c 1
         normal_check_interval   1
@@ -386,7 +388,7 @@ define service {
 define service {
         hostgroup_name          hbasemaster
         use                     hadoop-service
-        service_description     HBASEMASTER::HBase Web UI down
+        service_description     HBASEMASTER::HBase Master Web UI down
         servicegroups           HBASE
         check_command           check_webui!hbase
         normal_check_interval   1
@@ -397,7 +399,7 @@ define service {
 define service {
         hostgroup_name          hbasemaster
         use                     hadoop-service
-        service_description     HBASEMASTER::HBaseMaster CPU utilization
+        service_description     HBASEMASTER::HBase Master CPU utilization
         servicegroups           HBASE
         check_command           check_cpu!200%!250%
         normal_check_interval   5
@@ -408,7 +410,7 @@ define service {
 define service {
         hostgroup_name          hbasemaster
         use                     hadoop-service
-        service_description     HBASEMASTER::HBaseMaster Process down
+        service_description     HBASEMASTER::HBase Master process down
         servicegroups           HBASE
         check_command           check_tcp!60000!-w 1 -c 1
         normal_check_interval   0.5
@@ -422,7 +424,7 @@ define service {
 define service {
         hostgroup_name          hiveserver
         use                     hadoop-service
-        service_description     HIVE-METASTORE::HIVE-METASTORE status check
+        service_description     HIVE-METASTORE::Hive Metastore status check
         servicegroups           HIVE-METASTORE
         <%if scope.function_hdp_template_var("security_enabled")-%>
         check_command           check_hive_metastore_status!9083!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
@@ -439,7 +441,7 @@ define service {
 define service {
         hostgroup_name          oozie-server
         use                     hadoop-service
-        service_description     OOZIE::Oozie status check
+        service_description     OOZIE::Oozie Server status check
         servicegroups           OOZIE
         <%if scope.function_hdp_template_var("security_enabled")-%>
         check_command           check_oozie_status!11000!<%=scope.function_hdp_template_var("java64_home")%>!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>
@@ -456,7 +458,7 @@ define service {
 define service {
         hostgroup_name          webhcat-server
         use                     hadoop-service
-        service_description     WEBHCAT::WEBHCAT status check
+        service_description     WEBHCAT::WebHCat Server status check
         servicegroups           WEBHCAT 
         <%if scope.function_hdp_template_var("security_enabled")-%>
         check_command           check_templeton_status!50111!v1!true!<%=scope.function_hdp_template_var("keytab_path")%>/<%=scope.function_hdp_template_var("nagios_user")%>.headless.keytab!<%=scope.function_hdp_template_var("nagios_user")%>

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/server.pp Wed Mar 20 20:44:43 2013
@@ -75,7 +75,8 @@ class hdp-templeton::server(
 
 class hdp-templeton::copy-hdfs-directories($service_state)
 {
- $templeton_user = $hdp-templeton::params::templeton_user
+ $webhcat_apps_dir = $hdp::params::webhcat_apps_dir
+ $webhcat_user = $hdp::params::webhcat_user
 # $pig_src_tar = "$hdp::params::artifact_dir/pig.tar.gz"
 
 #  hdp-hadoop::hdfs::copyfromlocal { '/usr/share/templeton/templeton*jar':
@@ -86,22 +87,22 @@ class hdp-templeton::copy-hdfs-directori
 #  }
   hdp-hadoop::hdfs::copyfromlocal { '/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar':
    service_state => $service_state,
-   owner => $hdp-templeton::params::templeton_user,
+   owner => $webhcat_user,
    mode  => '755',
-   dest_dir => '/apps/webhcat/hadoop-streaming.jar'
+   dest_dir => "$webhcat_apps_dir/hadoop-streaming.jar"
   }
   #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::pig_tar_name} instead
   hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/pig.tar.gz' :
     service_state => $service_state,
-    owner => $hdp-templeton::params::templeton_user,
+    owner => $webhcat_user,
     mode  => '755',
-    dest_dir => '/apps/webhcat/pig.tar.gz'
+    dest_dir => "$webhcat_apps_dir/pig.tar.gz"
   }
   #TODO: Use ${hdp::params::artifact_dir}/${hdp-templeton::params::hive_tar_name} instead
   hdp-hadoop::hdfs::copyfromlocal { '/usr/share/HDP-webhcat/hive.tar.gz' :
     service_state => $service_state,
-    owner => $hdp-templeton::params::templeton_user,
+    owner => $webhcat_user,
     mode  => '755',
-    dest_dir => '/apps/webhcat/hive.tar.gz'
+    dest_dir => "$webhcat_apps_dir/hive.tar.gz"
   }
 }

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb Wed Mar 20 20:44:43 2013
@@ -21,7 +21,7 @@
 #
 
 # The file containing the running pid
-PID_FILE=<%=scope.function_hdp_template_var("templeton_pid_dir")%>/webhcat.pid
+PID_FILE=<%=scope.function_hdp_template_var("hcat_pid_dir")%>/webhcat.pid
 
 TEMPLETON_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
 
@@ -41,4 +41,4 @@ CONSOLE_LOG=<%=scope.function_hdp_templa
 #HCAT_PREFIX=<%=scope.function_hdp_template_var("hive_prefix")%>/
 
 # Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=/usr/lib/hadoop
+export HADOOP_HOME=/usr/lib/hadoop

Added: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb?rev=1459041&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb (added)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_set_from_comma_list.rb Wed Mar 20 20:44:43 2013
@@ -0,0 +1,31 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#to handle differences in how args passed in
+require 'set'
+module Puppet::Parser::Functions
+  newfunction(:hdp_set_from_comma_list, :type => :rvalue) do |args|
+    list = function_hdp_array_from_comma_list(args)
+    list.each_index {|i| list [i]=list [i].strip}
+    #Delete empty strings
+    list.reject! { |e| e.empty? }
+    list.uniq   
+  end
+end
\ No newline at end of file

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp Wed Mar 20 20:44:43 2013
@@ -122,6 +122,7 @@ class hdp::params()
 
   $hive_user = hdp_default("hive_user","hive")
   $hcat_user = hdp_default("hcat_user","hcat")
+  $webhcat_user = hdp_default("webhcat_user","hcat")
 
   $oozie_user = hdp_default("oozie_user","oozie")
   $templeton_user = hdp_default("templeton_user","hcat")
@@ -132,12 +133,22 @@ class hdp::params()
   $smokeuser = hdp_default("smokeuser","ambari_qa")
   $smoke_user_group = hdp_default("smoke_user_group","users")
   
-  ############ Hdfs directories
-  $hbase_hdfs_root_dir = hdp_default("hadoop/hbase-site/hbase_hdfs_root_dir","/apps/hbase/data")
-  $oozie_hdfs_user_dir = hdp_default("oozie_hdfs_user_dir", "/user/oozie")
-  $hcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/hcat")
-  $hive_hdfs_user_dir = hdp_default("hive_hdfs_user_dir", "/user/hive")
+  ############ Hdfs users directories
+  $oozie_hdfs_user_dir = hdp_default("oozie_hdfs_user_dir", "/user/${oozie_user}")
+  $oozie_hdfs_user_mode = 775
+  $hcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/${hcat_user}")
+  $hcat_hdfs_user_mode = 755
+  $webhcat_hdfs_user_dir = hdp_default("hcat_hdfs_user_dir", "/user/${webhcat_user}")
+  $webhcat_hdfs_user_mode = 755
+  $hive_hdfs_user_dir = hdp_default("hive_hdfs_user_dir", "/user/${hive_user}")
+  $hive_hdfs_user_mode = 700
   $smoke_hdfs_user_dir = hdp_default("smoke_hdfs_user_dir", "/user/${smokeuser}")
+  $smoke_hdfs_user_mode = 770
+  
+  ############ Hdfs apps directories
+  $hive_apps_whs_dir = hdp_default("hive_apps_whs_dir", "/apps/hive/warehouse")
+  $webhcat_apps_dir = hdp_default("webhcat_apps_dir", "/apps/webhcat")
+  $hbase_hdfs_root_dir = hdp_default("hadoop/hbase-site/hbase_hdfs_root_dir","/apps/hbase/data")
 
   #because of Puppet user resource issue make sure that $hadoop_user is different from user_group
   if ($security_enabled == true) {

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py Wed Mar 20 20:44:43 2013
@@ -140,7 +140,7 @@ servicesToPidNames = {
   'GANGLIA_MONITOR': 'gmond.pid',
   'HBASE_MASTER': 'hbase-[A-Za-z0-9_]+-master.pid',
   'HBASE_REGIONSERVER': 'hbase-[A-Za-z0-9_]+-regionserver.pid',
-  'HCATALOG_SERVER': 'hcat.pid',
+  'HCATALOG_SERVER': 'webhcat.pid',
   'KERBEROS_SERVER': 'kadmind.pid',
   'HIVE_SERVER': 'hive-server.pid',
   'HIVE_METASTORE': 'hive.pid',

Modified: incubator/ambari/branches/branch-1.2/ambari-agent/src/test/python/TestHostname.py
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-agent/src/test/python/TestHostname.py?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-agent/src/test/python/TestHostname.py (original)
+++ incubator/ambari/branches/branch-1.2/ambari-agent/src/test/python/TestHostname.py Wed Mar 20 20:44:43 2013
@@ -20,7 +20,7 @@ limitations under the License.
 
 from unittest import TestCase
 import ambari_agent.hostname as hostname
-from ambari_agent.AmbariConfig import AmbariConfig
+import ambari_agent.AmbariConfig as AmbariConfig
 import socket
 import tempfile
 import shutil
@@ -29,25 +29,29 @@ import os, pprint, json,stat
 class TestHostname(TestCase):
 
   def test_hostname(self):
-    self.assertEquals(hostname.hostname(), socket.gethostname(), "hostname should equal the socket-based hostname")
+    self.assertEquals(hostname.hostname(), socket.getfqdn(), 
+                      "hostname should equal the socket-based hostname")
     pass
 
   def test_hostname_override(self):
-    tmpname = tempfile.mkstemp(text=True)[1]
+    fd = tempfile.mkstemp(text=True)
+    tmpname = fd[1]
+    os.close(fd[0])
     os.chmod(tmpname, os.stat(tmpname).st_mode | stat.S_IXUSR)
 
     tmpfile = file(tmpname, "w+")
 
+    config = AmbariConfig.config
     try:
       tmpfile.write("#!/bin/sh\n\necho 'test.example.com'")
       tmpfile.close()
 
-      config = AmbariConfig().getConfig()
       config.set('agent', 'hostname_script', tmpname)
 
       self.assertEquals(hostname.hostname(), 'test.example.com', "expected hostname 'test.example.com'")
     finally:
       os.remove(tmpname)
+      config.remove_option('agent', 'hostname_script')
 
     pass
 

Modified: incubator/ambari/branches/branch-1.2/ambari-project/pom.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-project/pom.xml?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-project/pom.xml (original)
+++ incubator/ambari/branches/branch-1.2/ambari-project/pom.xml Wed Mar 20 20:44:43 2013
@@ -17,14 +17,17 @@
   <parent>
     <groupId>org.apache.ambari</groupId>
     <artifactId>ambari</artifactId>
-    <version>1.2.1-SNAPSHOT</version>
+    <version>1.2.2-SNAPSHOT</version>
   </parent>
   <groupId>org.apache.ambari</groupId>
   <artifactId>ambari-project</artifactId>
-  <version>1.2.1-SNAPSHOT</version>
+  <version>1.2.2-SNAPSHOT</version>
   <description>Apache Ambari Project POM</description>
   <name>Apache Ambari Project POM</name>
   <packaging>pom</packaging>
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+  </properties>
   <pluginRepositories>
     <pluginRepository>
       <id>maven2-repository.dev.java.net</id>
@@ -361,6 +364,10 @@
     </pluginManagement>
     <plugins>
       <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.0</version>
+      </plugin>
+      <plugin>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration combine.self="override"/>

Modified: incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/clusters-cluster.md
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/clusters-cluster.md?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/clusters-cluster.md (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/clusters-cluster.md Wed Mar 20 20:44:43 2013
@@ -1,4 +1,4 @@
-<!---
+	<!---
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements. See the NOTICE file distributed with
 this work for additional information regarding copyright ownership.
@@ -28,97 +28,98 @@ Returns information for the specified cl
 
     200 OK
     {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster",
-      "Clusters" : {
-        "cluster_name" : "MyCluster",
-        "cluster_id" : 1,
-        "version" : "HDP-1.2.0"
-      },
-      "services" : [
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/NAGIOS",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "NAGIOS"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HCATALOG",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "HCATALOG"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/PIG",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "PIG"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/MAPREDUCE",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "MAPREDUCE"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/GANGLIA",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "GANGLIA"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HIVE",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "HIVE"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS",
-        "ServiceInfo" : {
-          "cluster_name" : "MyIE9",
-          "service_name" : "HDFS"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/ZOOKEEPER",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "ZOOKEEPER"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HBASE",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "HBASE"
-          }
-        },
-        {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/OOZIE",
-        "ServiceInfo" : {
-          "cluster_name" : "MyCluster",
-          "service_name" : "OOZIE"
-          }
-        } ],
-    "hosts" : [
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/hosts/some.cluster.host",
-      "Hosts" : {
-        "cluster_name" : "MyCluster",
-        "host_name" : "some.cluster.host"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/hosts/another.cluster.host",
-      "Hosts" : {
-        "cluster_name" : "MyCluster",
-        "host_name" : "another.cluster.host"
-        }
-      } ]
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1",
+      	"Clusters" : {
+        	"cluster_name" : "c1",
+        	"cluster_id" : 1,
+        	"version" : "HDP-1.2.0"
+      	},
+      	"services" : [
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/NAGIOS",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "NAGIOS"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HCATALOG",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "HCATALOG"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/PIG",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+         			"service_name" : "PIG"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/MAPREDUCE",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "MAPREDUCE"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/GANGLIA",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "GANGLIA"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HIVE",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "HIVE"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS",
+        		"ServiceInfo" : {
+          			"cluster_name" : "MyIE9",
+          			"service_name" : "HDFS"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/ZOOKEEPER",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+         	 		"service_name" : "ZOOKEEPER"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HBASE",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "HBASE"
+          		}
+        	},
+        	{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/OOZIE",
+        		"ServiceInfo" : {
+          			"cluster_name" : "c1",
+          			"service_name" : "OOZIE"
+          		}
+        	} 
+    	],
+    	"hosts" : [
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1",
+      			"Hosts" : {
+        			"cluster_name" : "c1",
+        			"host_name" : "some.cluster.host"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host2",
+      		"Hosts" : {
+        		"cluster_name" : "c1",
+        		"host_name" : "another.cluster.host"
+        	}
+        ]
     }
 

Modified: incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/clusters.md
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/clusters.md?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/clusters.md (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/clusters.md Wed Mar 20 20:44:43 2013
@@ -1,3 +1,4 @@
+
 <!---
 Licensed to the Apache Software Foundation (ASF) under one or more
 contributor license agreements. See the NOTICE file distributed with
@@ -29,11 +30,13 @@ Returns a collection of the currently co
     200 OK
     {
       "href" : "http://your.ambari.server/api/v1/clusters",
-      "items" : [ {
-        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster",
-        "Clusters" : {
-          "cluster_name" : "MyCluster",
-          "version" : "HDP-1.2.0"
-        }
-      } ]
-    }
+      "items" : [ 
+      		{
+        		"href" : "http://your.ambari.server/api/v1/clusters/c1",
+        		"Clusters" : {
+          			"cluster_name" : "c1",
+          			"version" : "HDP-1.2.0"
+        		}
+      		} 	
+    	]
+	}

Modified: incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/components-component.md
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/components-component.md?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/components-component.md (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/components-component.md Wed Mar 20 20:44:43 2013
@@ -28,47 +28,58 @@ Refers to a specific component identifie
 
     200 OK
     {
-    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/DATANODE",
-    "metrics" : {
-      "rpc" : {
-        ...
-      },
-      "dfs" : {
-        "datanode" : {
-          ...
-        }
-      },
-      "disk" : {
-        ...
-      },
-      "cpu" : {
-        ...
-      },
-      "jvm" : {
-        ...
-      },
-      "load" : {
-        ...
-      },
-      "memory" : {
-        ...
-      },
-      "network" : {
-        ...
-      },
-    },
-    "ServiceComponentInfo" : {
-      "cluster_name" : "MyCluster",
-      "component_name" : "DATANODE",
-      "service_name" : "HDFS"
-    },
-    "host_components" : [
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/hosts/some.cluster.host/host_components/DATANODE",
-      "HostRoles" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "DATANODE",
-        "host_name" : "some.cluster.host"
-        }
-      } ]
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
+    	"metrics" : {
+    		"process" : {
+    			...    
+    		},
+      		"rpc" : {
+        		...
+      		},
+      		"ugi" : {
+      			...
+      		},
+      		"dfs" : {
+        		"datanode" : {
+          		...
+        		}
+      		},
+      		"disk" : {
+        		...
+      		},
+      		"cpu" : {
+        		...
+      		},
+      		"rpcdetailed" : {
+      			...
+      		},
+      		"jvm" : {
+        		...
+      		},
+      		"load" : {
+        		...
+      		},
+      		"memory" : {
+        		...
+      		},
+      		"network" : {
+        		...
+      		},
+    	},
+    	"ServiceComponentInfo" : {
+      		"cluster_name" : "c1",
+      		"component_name" : "DATANODE",
+      		"service_name" : "HDFS"
+      		"state" : "STARTED"
+    	},
+    	"host_components" : [
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
+      			"HostRoles" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"host_name" : "host1"
+        		}
+      		}
+       	]
     }

Modified: incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/components.md
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/components.md?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/components.md (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/components.md Wed Mar 20 20:44:43 2013
@@ -28,38 +28,39 @@ Refers to a collection of all components
 
     200 OK
     {
-    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components",
-    "items" : [
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/DATANODE",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "DATANODE",
-        "service_name" : "HDFS"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/SECONDARY_NAMENODE",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "SECONDARY_NAMENODE",
-        "service_name" : "HDFS"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/NAMENODE",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "NAMENODE",
-        "service_name" : "HDFS"
-        }
-      },
-      {
-      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/HDFS_CLIENT",
-      "ServiceComponentInfo" : {
-        "cluster_name" : "MyCluster",
-        "component_name" : "HDFS_CLIENT",
-        "service_name" : "HDFS"
-        }
-      } ]
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components",
+    	"items" : [
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"service_name" : "HDFS"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/SECONDARY_NAMENODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "SECONDARY_NAMENODE",
+        			"service_name" : "HDFS"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/NAMENODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "NAMENODE",
+        			"service_name" : "HDFS"
+        		}
+      		},
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/HDFS_CLIENT",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "HDFS_CLIENT",
+        			"service_name" : "HDFS"
+        		}
+      		}
+      	]
     }

Modified: incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/host-component.md
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/host-component.md?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/host-component.md (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/host-component.md Wed Mar 20 20:44:43 2013
@@ -27,3 +27,57 @@ Returns information for a specific role 
 **Response**
 
     200 OK
+    {
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
+    	"HostRoles" : {
+    		"cluster_name" : "c1",
+      		"component_name" : "DATANODE",
+      		"host_name" : "host1",
+      		"state" : "STARTED"
+    	},
+    	"host" : {
+    		"href" : "http://localhost:8080/api/v1/clusters/c1/hosts/dev.hortonworks.com"
+    	},
+    	"metrics" : {
+    		"process" : {
+    			...    
+    		},
+      		"ugi" : {
+      			...
+      		},
+      		"dfs" : {
+        		"datanode" : {
+          		...
+        		}
+      		},
+      		"disk" : {
+        		...
+      		},
+      		"cpu" : {
+        		...
+      		},
+      		"jvm" : {
+        		...
+      		},
+      		"load" : {
+        		...
+      		},
+      		"memory" : {
+        		...
+      		},
+      		"network" : {
+        		...
+      		},
+    	},
+    	"component" : [
+      		{
+    	      	"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE",
+      			"ServiceComponentInfo" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"service_name" : "HDFS"
+        		}
+      		}
+       	]
+    }
+

Modified: incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/host-components.md
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/host-components.md?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/host-components.md (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/host-components.md Wed Mar 20 20:44:43 2013
@@ -27,4 +27,31 @@ Returns a collection of components runni
 **Response**
 
     200 OK
-
+    {
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components",
+    	items" : [
+    		{
+      			"href" : "your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
+      			"HostRoles" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"host_name" : "host1"
+      			},
+      			"host" : {
+        			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1"
+      			}
+    		},
+			{
+      			"href" : "your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/HBASE_CLIENT",
+      			"HostRoles" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "HBASE_CLIENT",
+        			"host_name" : "host1"
+      			},
+      			"host" : {
+        			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1"
+      			}
+    		},
+    		...
+		]
+	}
\ No newline at end of file

Modified: incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/hosts-host.md
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/hosts-host.md?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/hosts-host.md (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/hosts-host.md Wed Mar 20 20:44:43 2013
@@ -27,4 +27,74 @@ Returns information about a single host 
 **Response**
 
     200 OK
+    {
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1",
+    	"metrics" : {
+    		"process" : {
+    			...    
+    		},
+      		"rpc" : {
+        		...
+      		},
+      		"ugi" : {
+      			...
+      		}
+      		"disk" : {
+        		...
+      		},
+      		"cpu" : {
+        		...
+      		},
+      		"rpcdetailed" : {
+      			...
+      		},
+      		"jvm" : {
+        		...
+      		},
+      		"load" : {
+        		...
+      		},
+      		"memory" : {
+        		...
+      		},
+      		"network" : {
+        		...
+      		},
+    	},
+    	"Hosts" : {
+      		"cluster_name" : "c1",
+      		"host_name" : "host1",
+      		"host_state" : "HEALTHY",
+      		"public_host_name" : "host1.yourDomain.com",
+      		"cpu_count" : 1,
+      		"rack_info" : "rack-name",
+      		"os_arch" : "x86_64",
+      		disk_info : [
+      			{
+      				"available" : "41497444",
+        			"used" : "9584560",
+        			"percent" : "19%",
+        			"size" : "51606140",
+        			"type" : "ext4",
+       	 			"mountpoint" : "/"
+      			}
+      		],
+      		"ip" : "10.0.2.15",
+      		"os_type" : "rhel6",
+      		"total_mem" : 2055208,
+      		...        	      		
+    	},
+    	"host_components" : [
+      		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1/host_components/DATANODE",
+      			"HostRoles" : {
+        			"cluster_name" : "c1",
+        			"component_name" : "DATANODE",
+        			"host_name" : "host1"
+        		}
+      		},
+      		...
+       	]
+    }
+
 

Modified: incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/hosts.md
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/hosts.md?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/hosts.md (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/hosts.md Wed Mar 20 20:44:43 2013
@@ -27,3 +27,22 @@ Returns a collection of all hosts in a g
 **Response**
 
     200 OK
+    {
+    	"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/",
+    	"items" : [
+    		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host1",
+      			"Hosts" : {
+        			"cluster_name" : "c1",
+        			"host_name" : "host1"
+      			}
+    		},
+    		{
+      			"href" : "http://your.ambari.server/api/v1/clusters/c1/hosts/host2",
+      			"Hosts" : {
+        			"cluster_name" : "c1",
+        			"host_name" : "host2"
+      			}
+    		}
+    	]
+	}  	

Modified: incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/index.md
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/index.md?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/index.md (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/docs/api/v1/index.md Wed Mar 20 20:44:43 2013
@@ -18,7 +18,7 @@ limitations under the License.
 Ambari API Reference v1
 =========
 
-The Ambari API provides access to monitoring and metrics information of a Apache Hadoop cluster. This document describes the resources used in the Ambari API and is intended for developers who want to integrate with Ambari.
+The Ambari API provides access to monitoring and metrics information of a Apache Hadoop cluster. This document describes the resources and syntax used in the Ambari API and is intended for developers who want to integrate with Ambari.
 
 - [Release Version](#release-version)
 - [Authentication](#authentication)
@@ -30,7 +30,7 @@ The Ambari API provides access to monito
 
 Release Version
 ----
-_Last Updated December 28, 2012_
+_Last Updated February 13, 2013_
 
 Authentication
 ----
@@ -54,58 +54,82 @@ There are 2 types of resources in the Am
 
 - **Instance Resource:** This resource type refers to a single specific resource. For example:
 
-        /clusters/MyCluster
+        /clusters/c1
 
-  _Refers to the cluster resource identified by the id "MyCluster"_
-
-### Clusters
+  _Refers to the cluster resource identified by the id "c1"_
 
+### clusters
 - [List clusters](clusters.md)
 - [View cluster information](clusters-cluster.md)
 
-### Services
-
+### services
 - [List services](services.md)
 - [View service information](services-service.md)
-- [View service components](components.md)
-- [View component information](components-component.md)
 
-### Hosts
+### components
+- [List service components](components.md)
+- [View component information](components-component.md)
 
+### hosts
 - [List hosts](hosts.md)
 - [View host information](hosts-host.md)
+
+### host_components
 - [List host components](host-components.md)
 - [View host component information](host-component.md)
 
 Partial Response
 ----
 
-A mechanism used to control which fields are returned by a query.  Partial response can be used to restrict which fields are returned and additionally, it allows a query to reach down and return data from sub-resources.  The keyword “fields” is used to specify a partial response.  Only the fields listed will be returned to the client.  To specify sub-elements, use the notation “a/b/c”.  The wildcard ‘*’ can be used to show all fields for a resource.  This can be combined to provide ‘expand’ functionality for sub-components.  Some fields are always returned for a resource regardless of the specified partial response fields.  These fields are the fields which uniquely identify the resource.  This would be the primary id field of the resource and the foreign keys to the primary id fields of all ancestors of the resource.
+Used to control which fields are returned by a query.  Partial response can be used to restrict which fields are returned and additionally, it allows a query to reach down and return data from sub-resources.  The keyword “fields” is used to specify a partial response.  Only the fields specified will be returned to the client.  To specify sub-elements, use the notation “a/b/c”.  Properties, categories and sub-resources can be specified.  The wildcard ‘*’ can be used to show all categories, fields and sub-resources for a resource.  This can be combined to provide ‘expand’ functionality for sub-components.  Some fields are always returned for a resource regardless of the specified partial response fields.  These fields are the fields, which uniquely identify the resource.  This would be the primary id field of the resource and the foreign keys to the primary id fields of all ancestors of the resource.
 
-**Example: Partial Response (Name and All metrics)*
+**Example: Using Partial Response to restrict response to a specific field**
 
-    GET    /api/v1/clusters/MyCluster/services/HDFS/components/NAMENODE?fields=name,metrics
+    GET    /api/v1/clusters/c1/services/HDFS/components/NAMENODE?fields=metrics/disk/disk_total
 
+    200 OK
	{
    	“href”: “.../api/v1/clusters/c1/services/HDFS/components/NAMENODE?fields=metrics/disk/disk_total”,
    	“ServiceComponentInfo” : {
        	“cluster_name” : “c1”,
        	“component_name” : NAMENODE”,
        	“service_name” : “HDFS”
    	},
    	“metrics” : {
        	"disk" : {       
            	"disk_total" : 100000
        	}
    	}
    }
+
**Example: Using Partial Response to restrict response to specified category**
 
-    200 OK
-    {
-      “href” :”.../api/v1/clusters/MyCluster/services/HDFS/components/NAMENODE?fields=name,metrics”,
-      “name”: “NAMENODE”,
-      “metrics”: [
-        {
-        ...
-        }
-      ]
-    }
+    GET    /api/v1/clusters/c1/services/HDFS/components/NAMENODE?fields=metrics/disk
+
+    200 OK
	{
    	“href”: “.../api/v1/clusters/c1/services/HDFS/components/NAMENODE?fields=metrics/disk”,
    	“ServiceComponentInfo” : {
        	“cluster_name” : “c1”,
        	“component_name” : NAMENODE”,
        	“service_name” : “HDFS”
    	},
    	“metrics” : {
        	"disk" : {       
            	"disk_total" : 100000,
            	“disk_free” : 50000,
            	“part_max_used” : 1010
        	}
    	}
	}
+
+**Example – Using Partial Response to restrict response to multiple fields/categories**
+
+	GET	/api/v1/clusters/c1/services/HDFS/components/NAMENODE?fields=metrics/disk/disk_total,metrics/cpu
+	
+	200 OK
	{
    	“href”: “.../api/v1/clusters/c1/services/HDFS/components/NAMENODE?fields=metrics/disk/disk_total,metrics/cpu”,
    	“ServiceComponentInfo” : {
        	“cluster_name” : “c1”,
        	“component_name” : NAMENODE”,
        	“service_name” : “HDFS”
    	},
    	“metrics” : {
        	"disk" : {       
            	"disk_total" : 100000
        	},
        	“cpu” : {
            	“cpu_speed” : 10000000,
            	“cpu_num” : 4,
            	“cpu_idle” : 999999,
            	...
        	}
    	}
	}

**Example – Using Partial Response to restrict response to a sub-resource**
+
	GET	/api/v1/clusters/c1/hosts/host1?fields=host_components
+
	200 OK
	{
    	“href”: “.../api/v1/clusters/c1/hosts/host1?fields=host_components”,
    	“Hosts” : {
        	“cluster_name” : “c1”,
        	“host_name” : “host1”
    	},
    	“host_components”: [
        	{
            	“href”: “…/api/v1/clusters/c1/hosts/host1/host_components/NAMENODE”
            	“HostRoles” : {
                	“cluster_name” : “c1”,
                	“component_name” : “NAMENODE”,
                	“host_name” : “host1”
            	}
        	},
        	{
            	“href”: “…/api/v1/clusters/c1/hosts/host1/host_components/DATANODE”
            	“HostRoles” : {
                	“cluster_name” : “c1”,
                	“component_name” : DATANODE”,
                	“host_name” : “host1”
            	}
        	},
+            ... 
    	]
	}

**Example – Using Partial Response to expand a sub-resource one level deep**
+
	GET	/api/v1/clusters/c1/hosts/host1?fields=host_components/*
+
	200 OK
	{
    	“href”: “.../api/v1/clusters/c1/hosts/host1?fields=host_components/*”,
    	“Hosts” : {
        	“cluster_name” : “c1”,
        	“host_name” : “host1”
        },
+        “host_components”: [
        	{
            	“href”: “…/api/v1/clusters/c1/hosts/host1/host_components/DATANODE”
            	“HostRoles” : {
                	“cluster_name” : “c1”,
               		“component_name” : DATANODE”,
                	“host_name” : “host1”,
                	“state” : “RUNNING”,
                	...
            	},        
            	"host" : {     
                	"href" : ".../api/v1/clusters/c1/hosts/host1"  
            	},
            	“metrics” : {
                	"disk" : {       
                    	"disk_total" : 100000000,       
                    	"disk_free" : 5000000,       
                    	"part_max_used" : 10101     
                	},
                	...
            	},
            	"component" : {
                	"href" : "http://ambari.server/api/v1/clusters/c1/services/HDFS/components/NAMENODE", 
                	“ServiceComponentInfo” : {
                    	"cluster_name" : "c1",         
                    	"component_name" : "NAMENODE",         
                    	"service_name" : "HDFS"       
                	}
            	}  
        	},
        	...
    	]
	}

+**Example – Using Partial Response for multi-level expansion of sub-resources**
+	
+	GET /api/v1/clusters/c1/hosts/host1?fields=host_components/component/*
+	
+	200 OK
	{
    	“href”: “http://ambari.server/api/v1/clusters/c1/hosts/host1?fields=host_components/*”,
    	“Hosts” : {
        	“cluster_name” : “c1”,
        	“host_name” : “host1”
        	...
    	},
+    	“host_components”: [
+    		{
            	“href”: “…/api/v1/clusters/c1/hosts/host1/host_components/DATANODE”,
            	“HostRoles” : {
                	“cluster_name” : “c1”,
                	“component_name” : DATANODE”,
                	“host_name” : “host1”
            	}, 
            	"component" : {
                	"href" : "http://ambari.server/api/v1/clusters/c1/services/HDFS/components/DATANODE", 
                	“ServiceComponentInfo” : {
                   		"cluster_name" : "c1",         
                    	"component_name" : "DATANODE",         
                    	"service_name" : "HDFS"  
                    	...     
                	},
             		“metrics”: {
                   		“dfs”: {
                       		“datanode” : {
          	                	“blocks_written " :  10000,
          	                	“blocks_read" : 5000,
                             	...
                        	}
                    	},
                    	“disk”: {
                       		"disk_total " :  1000000,
                        	“disk_free" : 50000,
                        	...
                    	},
                   		... 	
+					}
            	}
        	},
        	{
            	“href”: “…/api/v1/clusters/c1/hosts/host1/host_components/NAMENODE”,
            	“HostRoles” : {
                	“cluster_name” : “c1”,
                	“component_name” : NAMENODE”,
                	“host_name” : “host1”
            	}, 
            	"component" : {
                	"href" : "http://ambari.server/api/v1/clusters/c1/services/HDFS/components/NAMENODE", 
                	“ServiceComponentInfo” : {
                   		"cluster_name" : "c1",         
                    	"component_name" : "NAMENODE",         
                    	"service_name" : "HDFS"       
                	},
             		“metrics”: {
                    	“dfs”: {
                       		“namenode” : {
          	            		“FilesRenamed " :  10,
          	            		“FilesDeleted" : 5
                         		…
                    		}
+						},	
                    	“disk”: {
                       		"disk_total " :  1000000,
                       		“disk_free" : 50000,
                        	...
                    	}
                	},
                	...
            	}
        	},
        	...
    	]
	}

**Example: Using Partial Response to expand collection resource instances one level deep**
+
	GET /api/v1/clusters/c1/hosts?fields=*
+
	200 OK
	{
    	“href” : “http://ambari.server/api/v1/clusters/c1/hosts/?fields=*”,    
    	“items”: [ 
        	{
            	“href” : “http://ambari.server/api/v1/clusters/c1/hosts/host1”,
            	“Hosts” : {
                	“cluster_name” :  “c1”,
                	“host_name” : “host1”
            	},
            	“metrics”: {
                	“process”: {          	    
                   		"proc_total" : 1000,
          	       		"proc_run" : 1000
                	},
                	...
            	},
            	“host_components”: [
                	{
                   		“href”: “…/api/v1/clusters/c1/hosts/host1/host_components/NAMENODE”
                    	“HostRoles” : {
                       		“cluster_name” : “c1”,
                         	“component_name” : “NAMENODE”,
                        	“host_name” : “host1”
                    	}
                	},
                	{
                    	“href”: “…/api/v1/clusters/c1/hosts/host1/host_components/DATANODE”
                    	“HostRoles” : {
                       		“cluster_name” : “c1”,
                        	“component_name” : DATANODE”,
                        	“host_name” : “host1”
                    	}
                	},
                	...
            	},
            	...
        	},
        	{
            	“href” : “http://ambari.server/api/v1/clusters/c1/hosts/host2”,
            	“Hosts” : {
                	“cluster_name” :  “c1”,
                	“host_name” : “host2”
            	},
            	“metrics”: {
               		“process”: {          	    
                   		"proc_total" : 555,
          	     		"proc_run" : 55
                	},
                	...
            	},
            	“host_components”: [
                	{
                   		“href”: “…/api/v1/clusters/c1/hosts/host1/host_components/DATANODE”
                    	“HostRoles” : {
                       		“cluster_name” : “c1”,
                        	“component_name” : “DATANODE”,
                        	“host_name” : “host2”
                    	}
                	},
                	...
            	],
            	...
        	},
        	...
    	]
	}

### Additional Partial Response Examples
+
**Example – For each cluster, get cluster name, all hostname’s and all service names**
+
	GET   /api/v1/clusters?fields=Clusters/cluster_name,hosts/Hosts/host_name,services/ServiceInfo/service_name
+
**Example - Get all hostname’s for a given component**
+
	GET	/api/v1/clusters/c1/services/HDFS/components/DATANODE?fields=host_components/HostRoles/host_name
+
**Example - Get all hostname’s and component names for a given service**
+
	GET	/api/v1/clusters/c1/services/HDFS?fields=components/host_components/HostRoles/host_name,
+                                      	          components/host_components/HostRoles/component_name


 
-Query Parameters
+Query Predicates
 ----
 
-This mechanism limits which data is returned by a query based on a predicate(s). Providing query parameters does not result in any link expansion in the data that is returned to the client although it may result in expansion on the server to apply predicates on sub-objects.
+Used to limit which data is returned by a query.  This is synonymous to the “where” clause in a SQL query.  Providing query parameters does not result in any link expansion in the data that is returned, with the exception of the fields used in the predicates.  Query predicates can only be applied to collection resources.  A predicate consists of at least one relational expression.  Predicates with multiple relational expressions also contain logical operators, which connect the relational expressions.  Predicates may also use brackets for explicit grouping of expressions. 
 
-_Note: Only applies to collection resources. And all URLs must be properly URL encoded_
-
-**Query Operators**
+###Relational Query Operators
 
 <table>
   <tr>
@@ -115,58 +139,169 @@ _Note: Only applies to collection resour
   </tr>
   <tr>
     <td>=</td>
-    <td>name=host1</th>
-    <td>String or numerical equals</td>
+    <td>name=host1</td>
+    <td>String or numerical EQUALS</td>
   </tr>
   <tr>
     <td>!=</td>
-    <td>host!=host1</th>
-    <td>String or numerical not equals</td>
+    <td>name!=host1</td>
+    <td>String or numerical NOT EQUALS</td>
   </tr>
   <tr>
     <td>&lt;</td>
-    <td>disk_total&lt;50</th>
-    <td>Numerical less than</td>
+    <td>disk_total&lt;50</td>
+    <td>Numerical LESS THAN</td>
   </tr>
   <tr>
     <td>&gt;</td>
-    <td>disk_total&gt;50</th>
-    <td>Numerical greater than</td>
+    <td>disk_total&gt;50</td>
+    <td>Numerical GREATER THAN</td>
   </tr>
   <tr>
     <td>&lt;=</td>
-    <td>disk_total&lt;=50</th>
-    <td>Numerical less than or equals</td>
+    <td>disk_total&lt;=50</td>
+    <td>Numerical LESS THAN OR EQUALS</td>
   </tr>
   <tr>
     <td>&gt;=</td>
-    <td>disk_total&gt;=50</th>
-    <td>Numerical greater than or equals</td>
+    <td>disk_total&gt;=50</td>
+    <td>Numerical GREATER THAN OR EQUALS</td>
+  </tr>  
+</table>
+
+###Logical Query Operators
+
+<table>
+  <tr>
+    <th>Operator</th>
+    <th>Example</th>
+    <th>Description</th>
+  </tr>
+  <tr>
+    <td>|</td>
+    <td>name=host1|name=host2</td>
+    <td>Logical OR operator</td>
+  </tr>
+  <tr>
+    <td>&</td>
+    <td>prop1=foo&prop2=bar</td>
+    <td>Logical AND operator</td>
   </tr>
   <tr>
-    <td>or</td>
-    <td>disk_total&gt;50 or disk_free&lt;100</th>
-    <td>Logial 'or'</td>
+    <td>!</td>
+    <td>!prop<50</td>
+    <td>Logical NOT operator</td>
   </tr>
 </table>
 
-**Example: Get all hosts with less than 100 "disk_total"**
+**Logical Operator Precedence**
 
-    GET  /api/v1/clusters/c1/hosts?metrics/disk/disk_total<100
+Standard logical operator precedence rules apply.  The above logical operators are listed in order of precedence starting with the lowest priority.  
 
-Errors
+###Brackets
+
+<table>
+  <tr>
+    <th>Bracket</th>
+    <th>Description</th>
+  </tr>
+  <tr>
+    <td>(</td>
+    <td>Opening Bracket</td>
+  </tr>
+  <tr>
+    <td>)</td>
+    <td>Closing Bracket</td>
+  </tr>
+
+</table>
+  
+Brackets can be used to provide explicit grouping of expressions. Expressions within brackets have the highest precedence.
+
+###Operator Functions
+ 
+<table>
+  <tr>
+    <th>Operator</th>
+    <th>Example</th>
+    <th>Description</th>
+  </tr>
+  <tr>
+    <td>in()</td>
+    <td>name.in(foo,bar)</td>
+    <td>IN function.  More compact form of name=foo|name=bar. </td>
+  </tr>
+  <tr>
+    <td>isEmpty()</td>
+    <td>category.isEmpty()</td>
+    <td>Used to determine if a category contains any properties. </td>
+  </tr>
+</table>
+Operator functions behave like relational operators and provide additional functionality.  Some operator functions, such as in(), act as binary operators like the above relational operators, where there is a left and right operand.  Some operator functions are unary operators, such as isEmpty(), where there is only a single operand.
+
+###Query Examples
+
+**Example – Get all hosts with “HEALTHY” status that have 2 or more cpu**
+	
+	GET	/api/v1/clusters/c1/hosts?Hosts/host_status=HEALTHY&Hosts/cpu_count>=2
+	
+**Example – Get all hosts with less than 2 cpu or host status != HEALTHY**
	
+
	GET	/api/v1/clusters/c1/hosts?Hosts/cpu_count<2|Hosts/host_status!=HEALTHY

**Example – Get all “rhel6” hosts with less than 2 cpu or “centos6” hosts with 3 or more cpu**  
+
	GET	/api/v1/clusters/c1/hosts?Hosts/os_type=rhel6&Hosts/cpu_count<2|Hosts/os_type=centos6&Hosts/cpu_count>=3

**Example – Get all hosts where either state != “HEALTHY” or last_heartbeat_time < 1360600135905 and rack_info=”default_rack”**

	GET	/api/v1/clusters/c1/hosts?(Hosts/host_status!=HEALTHY|Hosts/last_heartbeat_time<1360600135905)
+                                  &Hosts/rack_info=default_rack

**Example – Get hosts with host name of host1 or host2 or host3 using IN operator**
	
+	GET	/api/v1/clusters/c1/hosts?Hosts/host_name.in(host1,host2,host3)

**Example – Get and expand all HDFS components, which have at least 1 property in the “metrics/jvm” category (combines query and partial response syntax)**

	GET	/api/v1/clusters/c1/services/HDFS/components?!metrics/jvm.isEmpty()&fields=*

Temporal Metrics
+----
+
+Some metrics have values that are available across a range in time.  To query a metric for a range of values, the following partial response syntax is used.  

To get temporal data for a single property:
?fields=category/property[start-time,end-time,step]	

To get temporal data for all properties in a category:
?fields=category[start-time,end-time,step]

start-time: Required field.  The start time for the query in Unix epoch time format.
end-time: Optional field, defaults to now.  The end time for the query in Unix epoch time format.
step: Optional field, defaults to the corresponding metrics system’s default value.  If provided, end-time must also be provided. The interval of time between returned data points specified in seconds. The larger the value provided, the fewer data points returned so this can be used to limit how much data is returned for the given time range.  This is only used as a suggestion so the result interval may differ from the one specified.

The returned result is a list of data points over the specified time range.  Each data point is a value / timestamp pair.

**Note**: It is important to understand that requesting large amounts of temporal data may result in severe performance degradation.  **Always** request the minimal amount of information necessary.  If large amounts of data are required, consider splitting the request up into multiple smaller requests.
+
**Example – Temporal Query for a single property using only start-time**
+
	GET	/api/v1/clusters/c1/hosts/host1?fields=metrics/jvm/gcCount[1360610225]
+
	
+	200 OK
	{
    	“href” : …/api/v1/clusters/c1/hosts/host1?fields=metrics/jvm/gcCount[1360610225]”,
    	...
    	“metrics”: [
        	{
            	“jvm”: {
          	    	"gcCount" : [
                   		[10, 1360610165],
                     	[12, 1360610180],
                     	[13, 1360610195],
                     	[14, 1360610210],
                     	[15, 1360610225]
                  	]
             	}
         	}
    	]
	}

**Example – Temporal Query for a category using start-time, end-time and step**
+
	GET	/api/v1/clusters/c1/hosts/host1?fields=metrics/jvm[1360610200,1360610500,100]
+
	200 OK
	{
    	“href” : …/clusters/c1/hosts/host1?fields=metrics/jvm[1360610200,1360610500,100]”,
    	...
    	“metrics”: [
        	{
            	“jvm”: {
          	    	"gcCount" : [
                   		[10, 1360610200],
                     	[12, 1360610300],
                     	[13, 1360610400],
                     	[14, 1360610500]
                  	],
                	"gcTimeMillis" : [
                   		[1000, 1360610200],
                     	[2000, 1360610300],
                     	[5000, 1360610400],
                     	[9500, 1360610500]
                  	],
                  	...
             	}
         	}
    	]
	}

	

+
+HTTP Return Codes
 ----
 
-This section describes how errors are represented in a response.
+The following HTTP codes may be returned by the API.
+<table>
+  <tr>
+    <th>HTTP CODE</th>
+    <th>Description</th>
+  </tr>
+  <tr>
+    <td>200</td>
+    <td>OK</td>  
+  </tr>
+  <tr>
+    <td>400</td>
+    <td>Bad Request</td>  
+  </tr>
+  <tr>
+    <td>401</td>
+    <td>Unauthorized</td>  
+  </tr>
+  <tr>
+    <td>403</td>
+    <td>Forbidden</td>  
+  </tr> 
+  <tr>
+    <td>404</td>
+    <td>Not Found</td>  
+  </tr>
+  <tr>
+    <td>500</td>
+    <td>Internal Server Error</td>  
+  </tr>
+</table>
+
+
+Errors
+----
 
-**Response**
+**Example errors responses**
 
-    404 Not Found
-    {
-      “status”: 404,
-      “message”: “standard message”,
-      “developerMessage”: “verbose developers message”,
-      “code”: 1234,
-      “moreInfo”, “...”
-    }
+    404 Not Found
	{   
    	"status" : 404,   
    	"message" : "The requested resource doesn't exist: Cluster not found, clusterName=someInvalidCluster" 
	} 
+
&nbsp;

	400 Bad Request
	{   
    	"status" : 400,   
    	"message" : "The properties [foo] specified in the request or predicate are not supported for the 
+                	 resource type Cluster."
	}