You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2013/03/20 21:44:50 UTC

svn commit: r1459041 [10/18] - in /incubator/ambari/branches/branch-1.2: ./ ambari-agent/ ambari-agent/conf/unix/ ambari-agent/src/main/puppet/modules/hdp-ganglia/files/ ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/ ambari-agent/src/main/...

Added: incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml?rev=1459041&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml (added)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml Wed Mar 20 20:44:43 2013
@@ -0,0 +1,195 @@
+<?xml version="1.0"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- This is the configuration file for the resource manager in Hadoop. -->
+<!-- You can configure various scheduling parameters related to queues. -->
+<!-- The properties for a queue follow a naming convention,such as, -->
+<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
+
+<configuration>
+
+  <property>
+    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
+    <value>3000</value>
+    <description>Maximum number of jobs in the system which can be initialized,
+     concurrently, by the CapacityScheduler.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.capacity</name>
+    <value>100</value>
+    <description>Percentage of the number of slots in the cluster that are
+      to be available for jobs in this queue.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
+    <value>-1</value>
+    <description>
+	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
+	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
+	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
+        Default value of -1 implies a queue can use complete capacity of the cluster.
+
+        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
+        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
+        other queues being affected.
+        
+        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
+        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
+        absolute terms would increase accordingly.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
+    <value>false</value>
+    <description>If true, priorities of jobs will be taken into 
+      account in scheduling decisions.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
+    <value>100</value>
+    <description> Each queue enforces a limit on the percentage of resources 
+    allocated to a user at any given time, if there is competition for them. 
+    This user limit can vary between a minimum and maximum value. The former
+    depends on the number of users who have submitted jobs, and the latter is
+    set to this property value. For example, suppose the value of this 
+    property is 25. If two users have submitted jobs to a queue, no single 
+    user can use more than 50% of the queue resources. If a third user submits
+    a job, no single user can use more than 33% of the queue resources. With 4 
+    or more users, no user can use more than 25% of the queue's resources. A 
+    value of 100 implies no user limits are imposed. 
+    </description>
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
+    <value>1</value>
+    <description>The multiple of the queue capacity which can be configured to 
+    allow a single user to acquire more slots. 
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
+    <value>200000</value>
+    <description>The maximum number of tasks, across all jobs in the queue, 
+    which can be initialized concurrently. Once the queue's jobs exceed this 
+    limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
+    <value>100000</value>
+    <description>The maximum number of tasks per-user, across all the of the 
+    user's jobs in the queue, which can be initialized concurrently. Once the 
+    user's jobs exceed this limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
+    <value>10</value>
+    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
+    determine the number of jobs which are accepted by the scheduler.  
+    </description>
+  </property>
+
+  <!-- The default configuration settings for the capacity task scheduler -->
+  <!-- The default values would be applied to all the queues which don't have -->
+  <!-- the appropriate property for the particular queue -->
+  <property>
+    <name>mapred.capacity-scheduler.default-supports-priority</name>
+    <value>false</value>
+    <description>If true, priorities of jobs will be taken into 
+      account in scheduling decisions by default in a job queue.
+    </description>
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
+    <value>100</value>
+    <description>The percentage of the resources limited to a particular user
+      for the job queue at any given point of time by default.
+    </description>
+  </property>
+
+
+  <property>
+    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
+    <value>1</value>
+    <description>The default multiple of queue-capacity which is used to 
+    determine the amount of slots a single user can consume concurrently.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
+    <value>200000</value>
+    <description>The default maximum number of tasks, across all jobs in the 
+    queue, which can be initialized concurrently. Once the queue's jobs exceed 
+    this limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
+    <value>100000</value>
+    <description>The default maximum number of tasks per-user, across all the of 
+    the user's jobs in the queue, which can be initialized concurrently. Once 
+    the user's jobs exceed this limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
+    <value>10</value>
+    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
+    used to determine the number of jobs which are accepted by the scheduler.  
+    </description>
+  </property>
+
+  <!-- Capacity scheduler Job Initialization configuration parameters -->
+  <property>
+    <name>mapred.capacity-scheduler.init-poll-interval</name>
+    <value>5000</value>
+    <description>The amount of time in miliseconds which is used to poll 
+    the job queues for jobs to initialize.
+    </description>
+  </property>
+  <property>
+    <name>mapred.capacity-scheduler.init-worker-threads</name>
+    <value>5</value>
+    <description>Number of worker threads which would be used by
+    Initialization poller to initialize jobs in a set of queue.
+    If number mentioned in property is equal to number of job queues
+    then a single thread would initialize jobs in a queue. If lesser
+    then a thread would get a set of queues assigned. If the number
+    is greater then number of threads would be equal to number of 
+    job queues.
+    </description>
+  </property>
+
+</configuration>

Added: incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml?rev=1459041&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml (added)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml Wed Mar 20 20:44:43 2013
@@ -0,0 +1,195 @@
+<?xml version="1.0"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- This is the configuration file for the resource manager in Hadoop. -->
+<!-- You can configure various scheduling parameters related to queues. -->
+<!-- The properties for a queue follow a naming convention,such as, -->
+<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
+
+<configuration>
+
+  <property>
+    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
+    <value>3000</value>
+    <description>Maximum number of jobs in the system which can be initialized,
+     concurrently, by the CapacityScheduler.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.capacity</name>
+    <value>100</value>
+    <description>Percentage of the number of slots in the cluster that are
+      to be available for jobs in this queue.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
+    <value>-1</value>
+    <description>
+	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
+	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
+	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
+        Default value of -1 implies a queue can use complete capacity of the cluster.
+
+        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
+        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
+        other queues being affected.
+        
+        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
+        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
+        absolute terms would increase accordingly.
+    </description>    
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
+    <value>false</value>
+    <description>If true, priorities of jobs will be taken into 
+      account in scheduling decisions.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
+    <value>100</value>
+    <description> Each queue enforces a limit on the percentage of resources 
+    allocated to a user at any given time, if there is competition for them. 
+    This user limit can vary between a minimum and maximum value. The former
+    depends on the number of users who have submitted jobs, and the latter is
+    set to this property value. For example, suppose the value of this 
+    property is 25. If two users have submitted jobs to a queue, no single 
+    user can use more than 50% of the queue resources. If a third user submits
+    a job, no single user can use more than 33% of the queue resources. With 4 
+    or more users, no user can use more than 25% of the queue's resources. A 
+    value of 100 implies no user limits are imposed. 
+    </description>
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
+    <value>1</value>
+    <description>The multiple of the queue capacity which can be configured to 
+    allow a single user to acquire more slots. 
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
+    <value>200000</value>
+    <description>The maximum number of tasks, across all jobs in the queue, 
+    which can be initialized concurrently. Once the queue's jobs exceed this 
+    limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
+    <value>100000</value>
+    <description>The maximum number of tasks per-user, across all the of the 
+    user's jobs in the queue, which can be initialized concurrently. Once the 
+    user's jobs exceed this limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
+    <value>10</value>
+    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
+    determine the number of jobs which are accepted by the scheduler.  
+    </description>
+  </property>
+
+  <!-- The default configuration settings for the capacity task scheduler -->
+  <!-- The default values would be applied to all the queues which don't have -->
+  <!-- the appropriate property for the particular queue -->
+  <property>
+    <name>mapred.capacity-scheduler.default-supports-priority</name>
+    <value>false</value>
+    <description>If true, priorities of jobs will be taken into 
+      account in scheduling decisions by default in a job queue.
+    </description>
+  </property>
+  
+  <property>
+    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
+    <value>100</value>
+    <description>The percentage of the resources limited to a particular user
+      for the job queue at any given point of time by default.
+    </description>
+  </property>
+
+
+  <property>
+    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
+    <value>1</value>
+    <description>The default multiple of queue-capacity which is used to 
+    determine the amount of slots a single user can consume concurrently.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
+    <value>200000</value>
+    <description>The default maximum number of tasks, across all jobs in the 
+    queue, which can be initialized concurrently. Once the queue's jobs exceed 
+    this limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
+    <value>100000</value>
+    <description>The default maximum number of tasks per-user, across all the of 
+    the user's jobs in the queue, which can be initialized concurrently. Once 
+    the user's jobs exceed this limit they will be queued on disk.  
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
+    <value>10</value>
+    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
+    used to determine the number of jobs which are accepted by the scheduler.  
+    </description>
+  </property>
+
+  <!-- Capacity scheduler Job Initialization configuration parameters -->
+  <property>
+    <name>mapred.capacity-scheduler.init-poll-interval</name>
+    <value>5000</value>
+    <description>The amount of time in miliseconds which is used to poll 
+    the job queues for jobs to initialize.
+    </description>
+  </property>
+  <property>
+    <name>mapred.capacity-scheduler.init-worker-threads</name>
+    <value>5</value>
+    <description>Number of worker threads which would be used by
+    Initialization poller to initialize jobs in a set of queue.
+    If number mentioned in property is equal to number of job queues
+    then a single thread would initialize jobs in a queue. If lesser
+    then a thread would get a set of queues assigned. If the number
+    is greater then number of threads would be equal to number of 
+    job queues.
+    </description>
+  </property>
+
+</configuration>

Added: incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Check.sql
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Check.sql?rev=1459041&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Check.sql (added)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Check.sql Wed Mar 20 20:44:43 2013
@@ -0,0 +1,20 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+\connect ambari;
+
+COPY (SELECT count(*) FROM ambari.serviceconfigmapping WHERE service_name = 'MAPREDUCE') TO STDOUT WITH CSV;

Added: incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Fix.sql
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Fix.sql?rev=1459041&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Fix.sql (added)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.Fix.sql Wed Mar 20 20:44:43 2013
@@ -0,0 +1,26 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+\connect ambari;
+
+INSERT INTO ambari.serviceconfigmapping (cluster_id, service_name, config_type, config_tag, timestamp)
+    SELECT cluster_id, 'MAPREDUCE', type_name, version_tag, create_timestamp from ambari.clusterconfig 
+        WHERE type_name = 'global' ORDER BY create_timestamp DESC LIMIT 1;
+
+INSERT INTO ambari.serviceconfigmapping (cluster_id, service_name, config_type, config_tag, timestamp)
+    SELECT cluster_id, 'MAPREDUCE', type_name, version_tag, create_timestamp from ambari.clusterconfig
+         WHERE type_name = 'mapred-site' ORDER BY create_timestamp DESC LIMIT 1;

Added: incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.sql
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.sql?rev=1459041&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.sql (added)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.2.2.sql Wed Mar 20 20:44:43 2013
@@ -0,0 +1,21 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+\connect ambari;
+
+ALTER TABLE ambari.hosts
+  ALTER COLUMN disks_info TYPE VARCHAR(10000);

Added: incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-UPGRADE_STACK.sql
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-UPGRADE_STACK.sql?rev=1459041&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-UPGRADE_STACK.sql (added)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/main/resources/upgrade/dml/Ambari-DML-Postgres-UPGRADE_STACK.sql Wed Mar 20 20:44:43 2013
@@ -0,0 +1,44 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+\connect ambari;
+
+PREPARE clusters (text, text) AS
+  UPDATE ambari.clusters
+    SET desired_stack_version = '{"stackName":"' || $1 || '","stackVersion":"' || $2 || '"}';
+
+PREPARE hostcomponentdesiredstate (text, text) AS
+  UPDATE ambari.hostcomponentdesiredstate
+    SET desired_stack_version = '{"stackName":"' || $1 || '","stackVersion":"' || $2 || '"}';
+
+PREPARE hostcomponentstate (text, text) AS
+  UPDATE ambari.hostcomponentstate
+    SET current_stack_version = '{"stackName":"' || $1 || '","stackVersion":"' || $2 || '"}';
+
+PREPARE servicecomponentdesiredstate (text, text) AS
+  UPDATE ambari.servicecomponentdesiredstate
+    SET desired_stack_version = '{"stackName":"' || $1  || '","stackVersion":"' || $2 || '"}';
+
+PREPARE servicedesiredstate (text, text) AS
+  UPDATE ambari.servicedesiredstate
+    SET desired_stack_version = '{"stackName":"' || $1 || '","stackVersion":"' || $2 || '"}';
+
+EXECUTE clusters(:stack_name, :stack_version);
+EXECUTE hostcomponentdesiredstate(:stack_name, :stack_version);
+EXECUTE hostcomponentstate(:stack_name, :stack_version);
+EXECUTE servicecomponentdesiredstate(:stack_name, :stack_version);
+EXECUTE servicedesiredstate(:stack_name, :stack_version);
\ No newline at end of file

Added: incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java?rev=1459041&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java (added)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java Wed Mar 20 20:44:43 2013
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.agent;
+
+import org.apache.ambari.server.Role;
+
+public interface DummyHeartbeatConstants {
+
+  String DummyCluster = "cluster1";
+  String DummyHostname1 = "host1";
+  String DummyOs = "CentOS";
+  String DummyOsType = "centos5";
+  String DummyOSRelease = "5.8";
+
+  String DummyHostStatus = "I am ok";
+
+  String DummyStackId = "HDP-0.1";
+
+  String HDFS = "HDFS";
+  String HBASE = "HBASE";
+
+  String DATANODE = Role.DATANODE.name();
+  String NAMENODE = Role.NAMENODE.name();
+  String SECONDARY_NAMENODE = Role.SECONDARY_NAMENODE.name();
+  String HBASE_MASTER = Role.HBASE_MASTER.name();
+
+}

Modified: incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java Wed Mar 20 20:44:43 2013
@@ -17,22 +17,77 @@
  */
 package org.apache.ambari.server.agent;
 
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyCluster;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostStatus;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOSRelease;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOs;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOsType;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyStackId;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE_MASTER;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.NAMENODE;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.SECONDARY_NAMENODE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import javax.xml.bind.JAXBException;
+
 import junit.framework.Assert;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.actionmanager.*;
+import org.apache.ambari.server.actionmanager.ActionDBAccessor;
+import org.apache.ambari.server.actionmanager.ActionDBAccessorImpl;
+import org.apache.ambari.server.actionmanager.ActionDBInMemoryImpl;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.agent.ActionQueue;
+import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.agent.ComponentStatus;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.agent.HeartBeat;
+import org.apache.ambari.server.agent.HeartBeatHandler;
+import org.apache.ambari.server.agent.HeartBeatResponse;
+import org.apache.ambari.server.agent.HeartbeatMonitor;
+import org.apache.ambari.server.agent.HostInfo;
+import org.apache.ambari.server.agent.HostStatus;
 import org.apache.ambari.server.agent.HostStatus.Status;
+import org.apache.ambari.server.agent.Register;
+import org.apache.ambari.server.agent.RegistrationResponse;
+import org.apache.ambari.server.agent.RegistrationStatus;
+import org.apache.ambari.server.agent.StatusCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.state.*;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostState;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
 import org.apache.ambari.server.utils.StageUtils;
@@ -43,14 +98,10 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.xml.bind.JAXBException;
-import java.io.IOException;
-import java.util.*;
-
-import static org.junit.Assert.*;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+import com.google.inject.Guice;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.PersistService;
 
 public class TestHeartbeatHandler {
 
@@ -85,22 +136,21 @@ public class TestHeartbeatHandler {
     ActionManager am = new ActionManager(0, 0, null, null,
         new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
-    String hostname = "host1";
-    fsm.addHost(hostname);
-    Host hostObject = clusters.getHost(hostname);
+    fsm.addHost(DummyHostname1);
+    Host hostObject = clusters.getHost(DummyHostname1);
     hostObject.setIPv4("ipv4");
     hostObject.setIPv6("ipv6");
-    hostObject.setOsType("centos5");
+    hostObject.setOsType(DummyOsType);
 
     ActionQueue aq = new ActionQueue();
 
     HeartBeatHandler handler = new HeartBeatHandler(fsm, aq, am, injector);
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
-    hi.setOS("CentOS");
-    hi.setOSRelease("5.8");
-    reg.setHostname(hostname);
+    hi.setHostName(DummyHostname1);
+    hi.setOS(DummyOs);
+    hi.setOSRelease(DummyOSRelease);
+    reg.setHostname(DummyHostname1);
     reg.setHardwareProfile(hi);
     handler.handleRegistration(reg);
 
@@ -108,251 +158,228 @@ public class TestHeartbeatHandler {
 
     ExecutionCommand execCmd = new ExecutionCommand();
     execCmd.setCommandId("2-34");
-    execCmd.setHostname(hostname);
-    aq.enqueue(hostname, new ExecutionCommand());
+    execCmd.setHostname(DummyHostname1);
+    aq.enqueue(DummyHostname1, new ExecutionCommand());
     HeartBeat hb = new HeartBeat();
     hb.setResponseId(0);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, "I am ok"));
-    hb.setHostname(hostname);
+    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
+    hb.setHostname(DummyHostname1);
 
     handler.handleHeartBeat(hb);
     assertEquals(HostState.HEALTHY, hostObject.getState());
-    assertEquals(0, aq.dequeueAll(hostname).size());
+    assertEquals(0, aq.dequeueAll(DummyHostname1).size());
   }
 
   @Test
   public void testStatusHeartbeat() throws Exception {
     ActionManager am = new ActionManager(0, 0, null, null,
             new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    final String hostname = "host1";
-    String clusterName = "cluster1";
-    String serviceName = "HDFS";
-    String componentName1 = "DATANODE";
-    String componentName2 = "NAMENODE";
-    //injector.injectMembers(this);
-
-    clusters.addHost(hostname);
-    clusters.getHost(hostname).setOsType("centos5");
-    clusters.getHost(hostname).persist();
-//    Host hostObject = clusters.getHost(hostname);
-//    hostObject.setIPv4("ipv4");
-//    hostObject.setIPv6("ipv6");
-//    hostObject.setOsType("centos5");
-//    hostObject.persist();
-    clusters.addCluster(clusterName);
 
-    Cluster cluster = clusters.getCluster(clusterName);
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
+    clusters.addHost(DummyHostname1);
+    clusters.getHost(DummyHostname1).setOsType(DummyOsType);
+    clusters.getHost(DummyHostname1).persist();
+    clusters.addCluster(DummyCluster);
+
+    Cluster cluster = clusters.getCluster(DummyCluster);
+    cluster.setDesiredStackVersion(new StackId(DummyStackId));
 
     @SuppressWarnings("serial")
     Set<String> hostNames = new HashSet<String>(){{
-      add(hostname);
+      add(DummyHostname1);
     }};
-    clusters.mapHostsToCluster(hostNames, clusterName);
-    Service hdfs = cluster.addService(serviceName);
+    clusters.mapHostsToCluster(hostNames, DummyCluster);
+    Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname).persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
 
     ActionQueue aq = new ActionQueue();
     HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
 
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
-    hi.setOS("CentOS");
-    hi.setOSRelease("5.8");
-    reg.setHostname(hostname);
+    hi.setHostName(DummyHostname1);
+    hi.setOS(DummyOs);
+    hi.setOSRelease(DummyOSRelease);
+    reg.setHostname(DummyHostname1);
     reg.setResponseId(0);
     reg.setHardwareProfile(hi);
     handler.handleRegistration(reg);
 
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(clusterName).getService(serviceName).
-            getServiceComponent(componentName1).getServiceComponentHost(hostname);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(clusterName).getService(serviceName).
-            getServiceComponent(componentName2).getServiceComponentHost(hostname);
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+            getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
+            getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost3 = clusters.getCluster(DummyCluster).getService(HDFS).
+        getServiceComponent(SECONDARY_NAMENODE).getServiceComponentHost(DummyHostname1);
     serviceComponentHost1.setState(State.INSTALLED);
     serviceComponentHost2.setState(State.INSTALLED);
-
+    serviceComponentHost3.setState(State.STARTING);
 
     HeartBeat hb = new HeartBeat();
     hb.setTimestamp(System.currentTimeMillis());
     hb.setResponseId(0);
-    hb.setHostname(hostname);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, "I am ok"));
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
     hb.setReports(new ArrayList<CommandReport>());
     ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
     ComponentStatus componentStatus1 = new ComponentStatus();
-    componentStatus1.setClusterName(clusterName);
-    componentStatus1.setServiceName(serviceName);
-    componentStatus1.setMessage("I am ok");
+    componentStatus1.setClusterName(DummyCluster);
+    componentStatus1.setServiceName(HDFS);
+    componentStatus1.setMessage(DummyHostStatus);
     componentStatus1.setStatus(State.STARTED.name());
-    componentStatus1.setComponentName(componentName1);
+    componentStatus1.setComponentName(DATANODE);
     componentStatuses.add(componentStatus1);
+    ComponentStatus componentStatus2 = new ComponentStatus();
+    componentStatus2.setClusterName(DummyCluster);
+    componentStatus2.setServiceName(HDFS);
+    componentStatus2.setMessage(DummyHostStatus);
+    componentStatus2.setStatus(State.STARTED.name());
+    componentStatus2.setComponentName(SECONDARY_NAMENODE);
+    componentStatuses.add(componentStatus2);
     hb.setComponentStatus(componentStatuses);
 
     handler.handleHeartBeat(hb);
     State componentState1 = serviceComponentHost1.getState();
     State componentState2 = serviceComponentHost2.getState();
+    State componentState3 = serviceComponentHost3.getState();
     assertEquals(State.STARTED, componentState1);
     assertEquals(State.INSTALLED, componentState2);
+    assertEquals(State.STARTED, componentState3);
   }
 
   @Test
-  public void testStartFailedStatusHeartbeat() throws Exception {
+  public void testLiveStatusUpdateAfterStopFailed() throws Exception {
     ActionManager am = new ActionManager(0, 0, null, null,
             new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    final String hostname = "host1";
-    String clusterName = "cluster1";
-    String serviceName = "HDFS";
-    String componentName1 = "DATANODE";
-    String componentName2 = "NAMENODE";
-    String componentName3 = "SECONDARY_NAMENODE";
-    //injector.injectMembers(this);
-
-    clusters.addHost(hostname);
-    clusters.getHost(hostname).setOsType("centos5");
-    clusters.getHost(hostname).persist();
-//    Host hostObject = clusters.getHost(hostname);
-//    hostObject.setIPv4("ipv4");
-//    hostObject.setIPv6("ipv6");
-//    hostObject.setOsType("centos5");
-//    hostObject.persist();
-    clusters.addCluster(clusterName);
+    clusters.addHost(DummyHostname1);
+    clusters.getHost(DummyHostname1).setOsType(DummyOsType);
+    clusters.getHost(DummyHostname1).persist();
+    clusters.addCluster(DummyCluster);
 
-    Cluster cluster = clusters.getCluster(clusterName);
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
+    Cluster cluster = clusters.getCluster(DummyCluster);
+    cluster.setDesiredStackVersion(new StackId(DummyStackId));
 
     @SuppressWarnings("serial")
     Set<String> hostNames = new HashSet<String>(){{
-      add(hostname);
+      add(DummyHostname1);
     }};
-    clusters.mapHostsToCluster(hostNames, clusterName);
-    Service hdfs = cluster.addService(serviceName);
+    clusters.mapHostsToCluster(hostNames, DummyCluster);
+    Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname).persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).
+            addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).
+            addServiceComponentHost(DummyHostname1).persist();
 
     ActionQueue aq = new ActionQueue();
     HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
 
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
-    hi.setOS("CentOS");
-    hi.setOSRelease("5.8");
-    reg.setHostname(hostname);
+    hi.setHostName(DummyHostname1);
+    hi.setOS(DummyOs);
+    hi.setOSRelease(DummyOSRelease);
+    reg.setHostname(DummyHostname1);
     reg.setResponseId(0);
     reg.setHardwareProfile(hi);
     handler.handleRegistration(reg);
 
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(clusterName).getService(serviceName).
-            getServiceComponent(componentName1).getServiceComponentHost(hostname);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(clusterName).getService(serviceName).
-            getServiceComponent(componentName2).getServiceComponentHost(hostname);
-    ServiceComponentHost serviceComponentHost3 = clusters.getCluster(clusterName).getService(serviceName).
-            getServiceComponent(componentName3).getServiceComponentHost(hostname);
-    serviceComponentHost1.setState(State.INSTALLED);
-    serviceComponentHost2.setState(State.START_FAILED);
-    serviceComponentHost3.setState(State.STARTED);
+    ServiceComponentHost serviceComponentHost1 = clusters.
+            getCluster(DummyCluster).getService(HDFS).
+            getServiceComponent(DATANODE).
+            getServiceComponentHost(DummyHostname1);
+    ServiceComponentHost serviceComponentHost2 = clusters.
+            getCluster(DummyCluster).getService(HDFS).
+            getServiceComponent(NAMENODE).
+            getServiceComponentHost(DummyHostname1);
+    serviceComponentHost1.setState(State.STOP_FAILED);
+    serviceComponentHost2.setState(State.STOP_FAILED);
 
     HeartBeat hb = new HeartBeat();
     hb.setTimestamp(System.currentTimeMillis());
     hb.setResponseId(0);
-    hb.setHostname(hostname);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, "I am ok"));
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
     hb.setReports(new ArrayList<CommandReport>());
     ArrayList<ComponentStatus> componentStatuses = new ArrayList<ComponentStatus>();
+
     ComponentStatus componentStatus1 = new ComponentStatus();
-    componentStatus1.setClusterName(clusterName);
-    componentStatus1.setServiceName(serviceName);
-    componentStatus1.setMessage("I am ok");
-    componentStatus1.setStatus(State.START_FAILED.name());
-    componentStatus1.setComponentName(componentName1);
+    componentStatus1.setClusterName(DummyCluster);
+    componentStatus1.setServiceName(HDFS);
+    componentStatus1.setMessage(DummyHostStatus);
+    componentStatus1.setStatus(State.STARTED.name());
+    componentStatus1.setComponentName(DATANODE);
     componentStatuses.add(componentStatus1);
 
     ComponentStatus componentStatus2 = new ComponentStatus();
-    componentStatus2.setClusterName(clusterName);
-    componentStatus2.setServiceName(serviceName);
-    componentStatus2.setMessage("I am ok");
+    componentStatus2.setClusterName(DummyCluster);
+    componentStatus2.setServiceName(HDFS);
+    componentStatus2.setMessage(DummyHostStatus);
     componentStatus2.setStatus(State.INSTALLED.name());
-    componentStatus2.setComponentName(componentName2);
+    componentStatus2.setComponentName(NAMENODE);
     componentStatuses.add(componentStatus2);
 
-    ComponentStatus componentStatus3 = new ComponentStatus();
-    componentStatus3.setClusterName(clusterName);
-    componentStatus3.setServiceName(serviceName);
-    componentStatus3.setMessage("I am ok");
-    componentStatus3.setStatus(State.INSTALLED.name());
-    componentStatus3.setComponentName(componentName3);
-    componentStatuses.add(componentStatus3);
-
     hb.setComponentStatus(componentStatuses);
 
     handler.handleHeartBeat(hb);
     State componentState1 = serviceComponentHost1.getState();
     State componentState2 = serviceComponentHost2.getState();
-    State componentState3 = serviceComponentHost3.getState();
-    assertEquals(State.START_FAILED, componentState1);
-    assertEquals(State.START_FAILED, componentState2);
-    assertEquals(State.INSTALLED, componentState3);
+    assertEquals(State.STARTED, componentState1);
+    assertEquals(State.INSTALLED, componentState2);
   }
 
   @Test
   public void testCommandReport() throws AmbariException {
-    String hostname = "host1";
-    String clusterName = "cluster1";
     injector.injectMembers(this);
-    clusters.addHost(hostname);
-    clusters.getHost(hostname).persist();
-    clusters.addCluster(clusterName);
+    clusters.addHost(DummyHostname1);
+    clusters.getHost(DummyHostname1).persist();
+    clusters.addCluster(DummyCluster);
     ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
     ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db,
         new HostsMap((String) null));
-    populateActionDB(db, hostname);
+    populateActionDB(db, DummyHostname1);
     Stage stage = db.getAllStages(requestId).get(0);
     Assert.assertEquals(stageId, stage.getStageId());
-    stage.setHostRoleStatus(hostname, "HBASE_MASTER", HostRoleStatus.QUEUED);
-    db.hostRoleScheduled(stage, hostname, "HBASE_MASTER");
+    stage.setHostRoleStatus(DummyHostname1, HBASE_MASTER, HostRoleStatus.QUEUED);
+    db.hostRoleScheduled(stage, DummyHostname1, HBASE_MASTER);
     List<CommandReport> reports = new ArrayList<CommandReport>();
     CommandReport cr = new CommandReport();
     cr.setActionId(StageUtils.getActionId(requestId, stageId));
     cr.setTaskId(1);
-    cr.setRole("HBASE_MASTER");
+    cr.setRole(HBASE_MASTER);
     cr.setStatus("COMPLETED");
     cr.setStdErr("");
     cr.setStdOut("");
     cr.setExitCode(215);
     reports.add(cr);
-    am.processTaskResponse(hostname, reports);
+    am.processTaskResponse(DummyHostname1, reports);
     assertEquals(215,
-            am.getAction(requestId, stageId).getExitCode(hostname, "HBASE_MASTER"));
+            am.getAction(requestId, stageId).getExitCode(DummyHostname1, HBASE_MASTER));
     assertEquals(HostRoleStatus.COMPLETED, am.getAction(requestId, stageId)
-            .getHostRoleStatus(hostname, "HBASE_MASTER"));
+            .getHostRoleStatus(DummyHostname1, HBASE_MASTER));
     Stage s = db.getAllStages(requestId).get(0);
     assertEquals(HostRoleStatus.COMPLETED,
-            s.getHostRoleStatus(hostname, "HBASE_MASTER"));
+            s.getHostRoleStatus(DummyHostname1, HBASE_MASTER));
     assertEquals(215,
-            s.getExitCode(hostname, "HBASE_MASTER"));
+            s.getExitCode(DummyHostname1, HBASE_MASTER));
   }
 
-  private void populateActionDB(ActionDBAccessor db, String hostname) {
-    Stage s = new Stage(requestId, "/a/b", "cluster1");
+  private void populateActionDB(ActionDBAccessor db, String DummyHostname1) {
+    Stage s = new Stage(requestId, "/a/b", DummyCluster);
     s.setStageId(stageId);
     String filename = null;
-    s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
+    s.addHostRoleExecutionCommand(DummyHostname1, Role.HBASE_MASTER,
         RoleCommand.START,
         new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
-            hostname, System.currentTimeMillis(),
-            new HashMap<String, String>()), "cluster1", "HBASE");
+            DummyHostname1, System.currentTimeMillis(),
+            new HashMap<String, String>()), DummyCluster, HBASE);
     List<Stage> stages = new ArrayList<Stage>();
     stages.add(s);
     db.persistActions(stages);
@@ -364,23 +391,22 @@ public class TestHeartbeatHandler {
     ActionManager am = new ActionManager(0, 0, null, null,
         new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
-    String hostname = "host1";
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
         injector);
-    clusters.addHost(hostname);
-    Host hostObject = clusters.getHost(hostname);
+    clusters.addHost(DummyHostname1);
+    Host hostObject = clusters.getHost(DummyHostname1);
     hostObject.setIPv4("ipv4");
     hostObject.setIPv6("ipv6");
 
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
-    hi.setOS("centos5");
-    reg.setHostname(hostname);
+    hi.setHostName(DummyHostname1);
+    hi.setOS(DummyOsType);
+    reg.setHostname(DummyHostname1);
     reg.setHardwareProfile(hi);
     handler.handleRegistration(reg);
     assertEquals(hostObject.getState(), HostState.HEALTHY);
-    assertEquals("centos5", hostObject.getOsType());
+    assertEquals(DummyOsType, hostObject.getOsType());
     assertTrue(hostObject.getLastRegistrationTime() != 0);
     assertEquals(hostObject.getLastHeartbeatTime(),
         hostObject.getLastRegistrationTime());
@@ -391,29 +417,28 @@ public class TestHeartbeatHandler {
     ActionManager am = new ActionManager(0, 0, null, null,
         new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
-    String hostname = "host1";
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
         injector);
-    clusters.addHost(hostname);
-    Host hostObject = clusters.getHost(hostname);
+    clusters.addHost(DummyHostname1);
+    Host hostObject = clusters.getHost(DummyHostname1);
     hostObject.setIPv4("ipv4");
     hostObject.setIPv6("ipv6");
 
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
-    hi.setOS("centos5");
-    reg.setHostname(hostname);
+    hi.setHostName(DummyHostname1);
+    hi.setOS(DummyOsType);
+    reg.setHostname(DummyHostname1);
     reg.setHardwareProfile(hi);
-    reg.setPublicHostname(hostname + "-public");
+    reg.setPublicHostname(DummyHostname1 + "-public");
     handler.handleRegistration(reg);
     assertEquals(hostObject.getState(), HostState.HEALTHY);
-    assertEquals("centos5", hostObject.getOsType());
+    assertEquals(DummyOsType, hostObject.getOsType());
     assertTrue(hostObject.getLastRegistrationTime() != 0);
     assertEquals(hostObject.getLastHeartbeatTime(),
         hostObject.getLastRegistrationTime());
     
-    Host verifyHost = clusters.getHost(hostname);
+    Host verifyHost = clusters.getHost(DummyHostname1);
     assertEquals(verifyHost.getPublicHostName(), reg.getPublicHostname());
   }
   
@@ -424,19 +449,18 @@ public class TestHeartbeatHandler {
     ActionManager am = new ActionManager(0, 0, null, null,
         new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
-    String hostname = "host1";
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
         injector);
-    clusters.addHost(hostname);
-    Host hostObject = clusters.getHost(hostname);
+    clusters.addHost(DummyHostname1);
+    Host hostObject = clusters.getHost(DummyHostname1);
     hostObject.setIPv4("ipv4");
     hostObject.setIPv6("ipv6");
 
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
+    hi.setHostName(DummyHostname1);
     hi.setOS("MegaOperatingSystem");
-    reg.setHostname(hostname);
+    reg.setHostname(DummyHostname1);
     reg.setHardwareProfile(hi);
     try {
       handler.handleRegistration(reg);
@@ -453,9 +477,8 @@ public class TestHeartbeatHandler {
     ActionManager am = new ActionManager(0, 0, null, null,
         new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
-    String hostname = "host1";
-    fsm.addHost(hostname);
-    Host hostObject = clusters.getHost(hostname);
+    fsm.addHost(DummyHostname1);
+    Host hostObject = clusters.getHost(DummyHostname1);
     hostObject.setIPv4("ipv4");
     hostObject.setIPv6("ipv6");
 
@@ -463,9 +486,9 @@ public class TestHeartbeatHandler {
         injector);
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
+    hi.setHostName(DummyHostname1);
     hi.setOS("redhat5");
-    reg.setHostname(hostname);
+    reg.setHostname(DummyHostname1);
     reg.setHardwareProfile(hi);
     RegistrationResponse response = handler.handleRegistration(reg);
 
@@ -487,7 +510,7 @@ public class TestHeartbeatHandler {
     register.setTimestamp(new Date().getTime());
     register.setResponseId(123);
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
+    hi.setHostName(DummyHostname1);
     hi.setOS("redhat5");
     register.setHardwareProfile(hi);
     RegistrationResponse registrationResponse = heartBeatHandler.handleRegistration(register);
@@ -534,8 +557,8 @@ public class TestHeartbeatHandler {
   public void testStateCommandsAtRegistration() throws AmbariException, InvalidStateTransitionException {
     List<StatusCommand> dummyCmds = new ArrayList<StatusCommand>();
     StatusCommand statusCmd1 = new StatusCommand();
-    statusCmd1.setClusterName("Cluster");
-    statusCmd1.setServiceName("HDFS");
+    statusCmd1.setClusterName(DummyCluster);
+    statusCmd1.setServiceName(HDFS);
     dummyCmds.add(statusCmd1);
     HeartbeatMonitor hm = mock(HeartbeatMonitor.class);
     when(hm.generateStatusCommands(anyString())).thenReturn(dummyCmds);
@@ -543,21 +566,20 @@ public class TestHeartbeatHandler {
     ActionManager am = new ActionManager(0, 0, null, null,
             new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
-    String hostname = "host1";
     ActionQueue actionQueue = new ActionQueue();
     HeartBeatHandler handler = new HeartBeatHandler(fsm, actionQueue, am,
         injector);
     handler.setHeartbeatMonitor(hm);
-    clusters.addHost(hostname);
-    Host hostObject = clusters.getHost(hostname);
+    clusters.addHost(DummyHostname1);
+    Host hostObject = clusters.getHost(DummyHostname1);
     hostObject.setIPv4("ipv4");
     hostObject.setIPv6("ipv6");
 
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
-    hi.setOS("redhat5");
-    reg.setHostname(hostname);
+    hi.setHostName(DummyHostname1);
+    hi.setOS(DummyOsType);
+    reg.setHostname(DummyHostname1);
     reg.setHardwareProfile(hi);
     RegistrationResponse registrationResponse = handler.handleRegistration(reg);
     registrationResponse.getStatusCommands();
@@ -569,67 +591,59 @@ public class TestHeartbeatHandler {
   public void testTaskInProgressHandling() throws AmbariException, InvalidStateTransitionException {
     ActionManager am = new ActionManager(0, 0, null, null,
             new ActionDBInMemoryImpl(), new HostsMap((String) null));
-    final String hostname = "host1";
-    String clusterName = "cluster1";
-    String serviceName = "HDFS";
-    String componentName1 = "DATANODE";
-    String componentName2 = "NAMENODE";
-
-    clusters.addHost(hostname);
-    clusters.getHost(hostname).setOsType("centos5");
-    clusters.getHost(hostname).persist();
-    clusters.addCluster(clusterName);
+    clusters.addHost(DummyHostname1);
+    clusters.getHost(DummyHostname1).setOsType(DummyOsType);
+    clusters.getHost(DummyHostname1).persist();
+    clusters.addCluster(DummyCluster);
 
-    Cluster cluster = clusters.getCluster(clusterName);
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
+    Cluster cluster = clusters.getCluster(DummyCluster);
+    cluster.setDesiredStackVersion(new StackId(DummyStackId));
 
     @SuppressWarnings("serial")
     Set<String> hostNames = new HashSet<String>(){{
-      add(hostname);
+      add(DummyHostname1);
     }};
-    clusters.mapHostsToCluster(hostNames, clusterName);
-    Service hdfs = cluster.addService(serviceName);
+    clusters.mapHostsToCluster(hostNames, DummyCluster);
+    Service hdfs = cluster.addService(HDFS);
     hdfs.persist();
-    hdfs.addServiceComponent(Role.DATANODE.name()).persist();
-    hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname).persist();
-    hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost(hostname).persist();
-    hdfs.addServiceComponent(Role.SECONDARY_NAMENODE.name()).persist();
-    hdfs.getServiceComponent(Role.SECONDARY_NAMENODE.name()).addServiceComponentHost(hostname).persist();
+    hdfs.addServiceComponent(DATANODE).persist();
+    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(NAMENODE).persist();
+    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1).persist();
+    hdfs.addServiceComponent(SECONDARY_NAMENODE).persist();
+    hdfs.getServiceComponent(SECONDARY_NAMENODE).addServiceComponentHost(DummyHostname1).persist();
 
     ActionQueue aq = new ActionQueue();
     HeartBeatHandler handler = new HeartBeatHandler(clusters, aq, am, injector);
 
     Register reg = new Register();
     HostInfo hi = new HostInfo();
-    hi.setHostName("host1");
-    hi.setOS("CentOS");
-    hi.setOSRelease("5.8");
-    reg.setHostname(hostname);
+    hi.setHostName(DummyHostname1);
+    hi.setOS(DummyOs);
+    hi.setOSRelease(DummyOSRelease);
+    reg.setHostname(DummyHostname1);
     reg.setResponseId(0);
     reg.setHardwareProfile(hi);
     handler.handleRegistration(reg);
 
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(clusterName).getService(serviceName).
-            getServiceComponent(componentName1).getServiceComponentHost(hostname);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(clusterName).getService(serviceName).
-            getServiceComponent(componentName2).getServiceComponentHost(hostname);
+    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
+            getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
     serviceComponentHost1.setState(State.INSTALLING);
 
 
     HeartBeat hb = new HeartBeat();
     hb.setTimestamp(System.currentTimeMillis());
     hb.setResponseId(0);
-    hb.setHostname(hostname);
-    hb.setNodeStatus(new HostStatus(Status.HEALTHY, "I am ok"));
+    hb.setHostname(DummyHostname1);
+    hb.setNodeStatus(new HostStatus(Status.HEALTHY, DummyHostStatus));
 
     List<CommandReport> reports = new ArrayList<CommandReport>();
     CommandReport cr = new CommandReport();
     cr.setActionId(StageUtils.getActionId(requestId, stageId));
     cr.setTaskId(1);
-    cr.setClusterName(clusterName);
-    cr.setServiceName(serviceName);
-    cr.setRole(componentName1);
+    cr.setClusterName(DummyCluster);
+    cr.setServiceName(HDFS);
+    cr.setRole(DATANODE);
     cr.setStatus("IN_PROGRESS");
     cr.setStdErr("none");
     cr.setStdOut("dummy output");

Modified: incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/TestSuite.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/TestSuite.java?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/TestSuite.java (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/TestSuite.java Wed Mar 20 20:44:43 2013
@@ -19,10 +19,13 @@ package org.apache.ambari.server.api;
  */
 
 /**
- * All unit tests.
+ * All api unit tests.
  */
 
 import org.apache.ambari.server.api.handlers.*;
+import org.apache.ambari.server.api.predicate.QueryLexerTest;
+import org.apache.ambari.server.api.predicate.QueryParserTest;
+import org.apache.ambari.server.api.predicate.operators.*;
 import org.apache.ambari.server.api.query.QueryImplTest;
 import org.apache.ambari.server.api.resources.ResourceInstanceImplTest;
 import org.apache.ambari.server.api.services.*;
@@ -36,6 +39,10 @@ import org.junit.runners.Suite;
     ComponentServiceTest.class, HostComponentServiceTest.class, ReadHandlerTest.class, QueryImplTest.class,
     JsonPropertyParserTest.class, CreateHandlerTest.class, UpdateHandlerTest.class, DeleteHandlerTest.class,
     PersistenceManagerImplTest.class, GetRequestTest.class, PutRequestTest.class, PostRequestTest.class,
-    DeleteRequestTest.class, JsonSerializerTest.class, QueryCreateHandlerTest.class, ResourceInstanceImplTest.class})
+    DeleteRequestTest.class, JsonSerializerTest.class, QueryCreateHandlerTest.class, ResourceInstanceImplTest.class,
+    QueryLexerTest.class, QueryParserTest.class, IsEmptyOperatorTest.class, InOperatorTest.class,
+    AndOperatorTest.class, OrOperatorTest.class, EqualsOperatorTest.class, GreaterEqualsOperatorTest.class,
+    GreaterOperatorTest.class, LessEqualsOperatorTest.class, LessEqualsOperatorTest.class, NotEqualsOperatorTest.class,
+    NotOperatorTest.class})
 public class TestSuite {
 }

Modified: incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/CreateHandlerTest.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/CreateHandlerTest.java?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/CreateHandlerTest.java (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/CreateHandlerTest.java Wed Mar 20 20:44:43 2013
@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.api.handlers;
 
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.ResultStatus;
 import org.apache.ambari.server.api.services.persistence.PersistenceManager;
@@ -162,4 +163,21 @@ public class CreateHandlerTest {
       return m_testPm;
     }
   }
+
+  @Test
+  public void testHandleRequest__InvalidQuery() throws Exception {
+    Request request = createNiceMock(Request.class);
+    ResourceInstance resource = createNiceMock(ResourceInstance.class);
+    Exception e = new InvalidQueryException("test exception");
+
+    expect(request.getResource()).andReturn(resource);
+    expect(request.getQueryPredicate()).andThrow(e);
+    replay(request, resource);
+
+    Result result = new CreateHandler().handleRequest(request);
+    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
+    assertTrue(result.getStatus().getMessage().contains(e.getMessage()));
+
+    verify(request, resource);
+  }
 }

Modified: incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/DeleteHandlerTest.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/DeleteHandlerTest.java?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/DeleteHandlerTest.java (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/DeleteHandlerTest.java Wed Mar 20 20:44:43 2013
@@ -18,6 +18,7 @@ package org.apache.ambari.server.api.han
  * limitations under the License.
  */
 
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.query.Query;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.ResultStatus;
@@ -28,7 +29,6 @@ import org.apache.ambari.server.api.util
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.junit.Test;
 
 import java.util.*;
@@ -171,4 +171,21 @@ public class DeleteHandlerTest {
       return m_testPm;
     }
   }
+
+  @Test
+  public void testHandleRequest__InvalidQuery() throws Exception {
+    Request request = createNiceMock(Request.class);
+    ResourceInstance resource = createNiceMock(ResourceInstance.class);
+    Exception e = new InvalidQueryException("test exception");
+
+    expect(request.getResource()).andReturn(resource);
+    expect(request.getQueryPredicate()).andThrow(e);
+    replay(request, resource);
+
+    Result result = new DeleteHandler().handleRequest(request);
+    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
+    assertTrue(result.getStatus().getMessage().contains(e.getMessage()));
+
+    verify(request, resource);
+  }
 }
\ No newline at end of file

Modified: incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/QueryCreateHandlerTest.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/QueryCreateHandlerTest.java?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/QueryCreateHandlerTest.java (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/QueryCreateHandlerTest.java Wed Mar 20 20:44:43 2013
@@ -19,6 +19,7 @@
 
 package org.apache.ambari.server.api.handlers;
 
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.query.Query;
 import org.apache.ambari.server.api.resources.ResourceDefinition;
 import org.apache.ambari.server.api.resources.ResourceInstance;
@@ -224,4 +225,28 @@ public class QueryCreateHandlerTest {
     }
   }
 
+  @Test
+  public void testHandleRequest__InvalidQueryException() throws Exception {
+    Request request = createStrictMock(Request.class);
+    ResourceInstance resource = createStrictMock(ResourceInstance.class);
+    Query query = createMock(Query.class);
+    InvalidQueryException exception = new InvalidQueryException("test");
+
+    expect(request.getResource()).andReturn(resource);
+    expect(resource.getQuery()).andReturn(query);
+
+    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
+
+    expect(request.getQueryPredicate()).andThrow(exception);
+    replay(request, resource, query);
+
+    //test
+    QueryCreateHandler handler = new QueryCreateHandler();
+    Result result = handler.handleRequest(request);
+
+    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
+    assertTrue(result.getStatus().getMessage().contains(exception.getMessage()));
+    verify(request, resource, query);
+  }
+
 }

Modified: incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/ReadHandlerTest.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/ReadHandlerTest.java?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/ReadHandlerTest.java (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/ReadHandlerTest.java Wed Mar 20 20:44:43 2013
@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.api.handlers;
 
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.query.Query;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.Request;
@@ -35,6 +36,7 @@ import java.util.Map;
 import static org.easymock.EasyMock.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Unit tests for ReadHandler.
@@ -246,6 +248,30 @@ public class ReadHandlerTest {
     verify(request, resource, query);
   }
 
+  @Test
+  public void testHandleRequest__InvalidQueryException() throws Exception {
+    Request request = createStrictMock(Request.class);
+    ResourceInstance resource = createStrictMock(ResourceInstance.class);
+    Query query = createMock(Query.class);
+    InvalidQueryException exception = new InvalidQueryException("test");
+
+    expect(request.getResource()).andReturn(resource);
+    expect(resource.getQuery()).andReturn(query);
+
+    expect(request.getFields()).andReturn(Collections.<String, TemporalInfo>emptyMap());
+
+    expect(request.getQueryPredicate()).andThrow(exception);
+    replay(request, resource, query);
+
+    //test
+    ReadHandler handler = new ReadHandler();
+    Result result = handler.handleRequest(request);
+
+    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
+    assertTrue(result.getStatus().getMessage().contains(exception.getMessage()));
+    verify(request, resource, query);
+  }
+
   //todo: reverted to just logging the exception and re-throwing it
 //  @Test
 //  public void testHandleRequest__RuntimeException() throws Exception {

Modified: incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/UpdateHandlerTest.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/UpdateHandlerTest.java?rev=1459041&r1=1459040&r2=1459041&view=diff
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/UpdateHandlerTest.java (original)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/handlers/UpdateHandlerTest.java Wed Mar 20 20:44:43 2013
@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.api.handlers;
 
+import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.query.Query;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.services.ResultStatus;
@@ -28,7 +29,6 @@ import org.apache.ambari.server.api.util
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.junit.Test;
 
 import java.util.*;
@@ -174,4 +174,21 @@ public class UpdateHandlerTest {
       return m_testPm;
     }
   }
+
+  @Test
+  public void testHandleRequest__InvalidQuery() throws Exception {
+    Request request = createNiceMock(Request.class);
+    ResourceInstance resource = createNiceMock(ResourceInstance.class);
+    Exception e = new InvalidQueryException("test exception");
+
+    expect(request.getResource()).andReturn(resource);
+    expect(request.getQueryPredicate()).andThrow(e);
+    replay(request, resource);
+
+    Result result = new UpdateHandler().handleRequest(request);
+    assertEquals(ResultStatus.STATUS.BAD_REQUEST, result.getStatus().getStatus());
+    assertTrue(result.getStatus().getMessage().contains(e.getMessage()));
+
+    verify(request, resource);
+  }
 }
\ No newline at end of file

Added: incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryLexerTest.java
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryLexerTest.java?rev=1459041&view=auto
==============================================================================
--- incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryLexerTest.java (added)
+++ incubator/ambari/branches/branch-1.2/ambari-server/src/test/java/org/apache/ambari/server/api/predicate/QueryLexerTest.java Wed Mar 20 20:44:43 2013
@@ -0,0 +1,226 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.ambari.server.api.predicate;
+
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import static org.junit.Assert.*;
+
+/**
+ * QueryLexer unit tests
+ */
+public class QueryLexerTest {
+
+  @Test
+  public void testTokens_simple() throws InvalidQueryException {
+    List<Token> listTokens = new ArrayList<Token>();
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
+    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
+    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<="));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "b"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "2"));
+    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "|"));
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, ">"));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "c"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "3"));
+    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
+
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("a=1&(b<=2|c>3)");
+
+    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
+  }
+
+  @Test
+  public void testTokens_multipleBrackets() throws InvalidQueryException {
+    List<Token> listTokens = new ArrayList<Token>();
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<"));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "a"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
+    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
+    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<="));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "b"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "2"));
+    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "&"));
+    listTokens.add(new Token(Token.TYPE.BRACKET_OPEN, "("));
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, ">="));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "c"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "3"));
+    listTokens.add(new Token(Token.TYPE.LOGICAL_OPERATOR, "|"));
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "!="));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "d"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "4"));
+    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
+    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
+
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("a<1&(b<=2&(c>=3|d!=4))");
+
+    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
+  }
+
+  @Test
+  public void testUnaryNot() throws Exception {
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("!foo<5");
+
+    List<Token> listTokens = new ArrayList<Token>();
+    listTokens.add(new Token(Token.TYPE.LOGICAL_UNARY_OPERATOR, "!"));
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "<"));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "5"));
+    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
+  }
+
+  @Test
+  public void testInOperator() throws Exception {
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("foo.in(one, two, 3)");
+
+    List<Token> listTokens = new ArrayList<Token>();
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, ".in("));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "one, two, 3"));
+    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
+
+    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
+  }
+
+  @Test
+  public void testIsEmptyOperator() throws Exception {
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("category1.isEmpty()");
+
+    List<Token> listTokens = new ArrayList<Token>();
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR_FUNC, ".isEmpty("));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "category1"));
+    listTokens.add(new Token(Token.TYPE.BRACKET_CLOSE, ")"));
+
+    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
+  }
+
+  @Test
+  public void testTokens_ignoreFieldsSyntax___noPredicate() throws InvalidQueryException {
+
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("fields=foo,bar");
+    assertEquals(0, tokens.length);
+  }
+
+  @Test
+  public void testTokens_ignoreFieldsSyntax___fieldsFirst() throws InvalidQueryException {
+
+    List<Token> listTokens = new ArrayList<Token>();
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
+
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("fields=foo,bar&foo=1");
+
+    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
+  }
+
+  @Test
+  public void testTokens_ignoreFieldsSyntax___fieldsLast() throws InvalidQueryException {
+
+    List<Token> listTokens = new ArrayList<Token>();
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
+
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("foo=1&fields=foo,bar");
+
+    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
+  }
+
+  @Test
+  public void testTokens_ignoreUnderscoreSyntax___noPredicate() throws InvalidQueryException {
+
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("_=1");
+    assertEquals(0, tokens.length);
+  }
+
+  @Test
+  public void testTokens_ignoreUnderscoreSyntax___fieldsFirst() throws InvalidQueryException {
+
+    List<Token> listTokens = new ArrayList<Token>();
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
+
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("_=111111&foo=1");
+
+    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
+  }
+
+  @Test
+  public void testTokens_ignoreUnderscoreSyntax___fieldsLast() throws InvalidQueryException {
+
+    List<Token> listTokens = new ArrayList<Token>();
+    listTokens.add(new Token(Token.TYPE.RELATIONAL_OPERATOR, "="));
+    listTokens.add(new Token(Token.TYPE.PROPERTY_OPERAND, "foo"));
+    listTokens.add(new Token(Token.TYPE.VALUE_OPERAND, "1"));
+
+    QueryLexer lexer = new QueryLexer();
+    Token[] tokens = lexer.tokens("foo=1&_=11111");
+
+    assertArrayEquals(listTokens.toArray(new Token[listTokens.size()]), tokens);
+  }
+
+  @Test
+  public void testTokens_invalidRelationalOp() {
+    try {
+      new QueryLexer().tokens("foo=1&bar|5");
+      fail("Expected InvalidQueryException due to invalid relational op");
+    } catch (InvalidQueryException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testTokens_invalidLogicalOp() {
+    try {
+      new QueryLexer().tokens("foo=1<5=2");
+      fail("Expected InvalidQueryException due to invalid logical op");
+    } catch (InvalidQueryException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testTokens_invalidLogicalOp2() {
+    try {
+      new QueryLexer().tokens("foo=1&&5=2");
+      fail("Expected InvalidQueryException due to invalid logical op");
+    } catch (InvalidQueryException e) {
+      //expected
+    }
+  }
+}