You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@falcon.apache.org by sr...@apache.org on 2014/02/06 08:38:02 UTC

svn commit: r1565098 [12/15] - in /incubator/falcon: site/ site/0.3-incubating/ site/0.3-incubating/docs/ site/0.3-incubating/docs/restapi/ site/0.4-incubating/ site/0.4-incubating/css/ site/0.4-incubating/docs/ site/0.4-incubating/docs/restapi/ site/0...

Modified: incubator/falcon/site/project-info.html
URL: http://svn.apache.org/viewvc/incubator/falcon/site/project-info.html?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/site/project-info.html (original)
+++ incubator/falcon/site/project-info.html Thu Feb  6 07:37:58 2014
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at Oct 28, 2013
+ | Generated by Apache Maven Doxia at Feb 6, 2014
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20131028" />
+    <meta name="Date-Revision-yyyymmdd" content="20140206" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Falcon - Project Information</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -99,6 +99,9 @@
                       <li>      <a href="http://www.apache.org/dist/incubator/falcon/0.3-incubating"  title="0.3-incubating">0.3-incubating</a>
 </li>
                   
+                      <li>      <a href="http://www.apache.org/dist/incubator/falcon/0.4-incubating"  title="0.4-incubating">0.4-incubating</a>
+</li>
+                  
                       <li>      <a href="https://cwiki.apache.org/confluence/display/FALCON/Roadmap"  title="Roadmap">Roadmap</a>
 </li>
                           </ul>
@@ -112,6 +115,9 @@
                   
                       <li>      <a href="0.3-incubating/index.html"  title="0.3-incubating">0.3-incubating</a>
 </li>
+                  
+                      <li>      <a href="0.4-incubating/index.html"  title="0.4-incubating">0.4-incubating</a>
+</li>
                           </ul>
       </li>
                 <li class="dropdown">
@@ -144,6 +150,9 @@
                   
                       <li>      <a href="docs/restapi/ResourceList.html"  title="Rest API">Rest API</a>
 </li>
+                  
+                      <li>      <a href="docs/HiveIntegration.html"  title="Hive Integration">Hive Integration</a>
+</li>
                           </ul>
       </li>
                 <li class="dropdown">
@@ -227,7 +236,7 @@
         
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2013-10-28</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2014-02-06</li> 
             
                             </ul>
       </div>
@@ -244,7 +253,7 @@
 
     <footer>
             <div class="container">
-              <div class="row span12">Copyright &copy;                    2013
+              <div class="row span12">Copyright &copy;                    2013-2014
                         <a href="http://www.apache.org">Apache Software Foundation</a>.
             All Rights Reserved.      
                     

Modified: incubator/falcon/site/source-repository.html
URL: http://svn.apache.org/viewvc/incubator/falcon/site/source-repository.html?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/site/source-repository.html (original)
+++ incubator/falcon/site/source-repository.html Thu Feb  6 07:37:58 2014
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at Oct 28, 2013
+ | Generated by Apache Maven Doxia at Feb 6, 2014
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20131028" />
+    <meta name="Date-Revision-yyyymmdd" content="20140206" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Falcon - Source Repository</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -99,6 +99,9 @@
                       <li>      <a href="http://www.apache.org/dist/incubator/falcon/0.3-incubating"  title="0.3-incubating">0.3-incubating</a>
 </li>
                   
+                      <li>      <a href="http://www.apache.org/dist/incubator/falcon/0.4-incubating"  title="0.4-incubating">0.4-incubating</a>
+</li>
+                  
                       <li>      <a href="https://cwiki.apache.org/confluence/display/FALCON/Roadmap"  title="Roadmap">Roadmap</a>
 </li>
                           </ul>
@@ -112,6 +115,9 @@
                   
                       <li>      <a href="0.3-incubating/index.html"  title="0.3-incubating">0.3-incubating</a>
 </li>
+                  
+                      <li>      <a href="0.4-incubating/index.html"  title="0.4-incubating">0.4-incubating</a>
+</li>
                           </ul>
       </li>
                 <li class="dropdown">
@@ -144,6 +150,9 @@
                   
                       <li>      <a href="docs/restapi/ResourceList.html"  title="Rest API">Rest API</a>
 </li>
+                  
+                      <li>      <a href="docs/HiveIntegration.html"  title="Hive Integration">Hive Integration</a>
+</li>
                           </ul>
       </li>
                 <li class="dropdown">
@@ -227,7 +236,7 @@
         
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2013-10-28</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2014-02-06</li> 
             
                             </ul>
       </div>
@@ -244,7 +253,7 @@
 
     <footer>
             <div class="container">
-              <div class="row span12">Copyright &copy;                    2013
+              <div class="row span12">Copyright &copy;                    2013-2014
                         <a href="http://www.apache.org">Apache Software Foundation</a>.
             All Rights Reserved.      
                     

Modified: incubator/falcon/site/team-list.html
URL: http://svn.apache.org/viewvc/incubator/falcon/site/team-list.html?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/site/team-list.html (original)
+++ incubator/falcon/site/team-list.html Thu Feb  6 07:37:58 2014
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at Oct 28, 2013
+ | Generated by Apache Maven Doxia at Feb 6, 2014
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20131028" />
+    <meta name="Date-Revision-yyyymmdd" content="20140206" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Falcon - Team list</title>
     <link rel="stylesheet" href="./css/apache-maven-fluido-1.3.0.min.css" />
@@ -99,6 +99,9 @@
                       <li>      <a href="http://www.apache.org/dist/incubator/falcon/0.3-incubating"  title="0.3-incubating">0.3-incubating</a>
 </li>
                   
+                      <li>      <a href="http://www.apache.org/dist/incubator/falcon/0.4-incubating"  title="0.4-incubating">0.4-incubating</a>
+</li>
+                  
                       <li>      <a href="https://cwiki.apache.org/confluence/display/FALCON/Roadmap"  title="Roadmap">Roadmap</a>
 </li>
                           </ul>
@@ -112,6 +115,9 @@
                   
                       <li>      <a href="0.3-incubating/index.html"  title="0.3-incubating">0.3-incubating</a>
 </li>
+                  
+                      <li>      <a href="0.4-incubating/index.html"  title="0.4-incubating">0.4-incubating</a>
+</li>
                           </ul>
       </li>
                 <li class="dropdown">
@@ -144,6 +150,9 @@
                   
                       <li>      <a href="docs/restapi/ResourceList.html"  title="Rest API">Rest API</a>
 </li>
+                  
+                      <li>      <a href="docs/HiveIntegration.html"  title="Hive Integration">Hive Integration</a>
+</li>
                           </ul>
       </li>
                 <li class="dropdown">
@@ -227,7 +236,7 @@
         
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2013-10-28</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2014-02-06</li> 
             
                             </ul>
       </div>
@@ -268,7 +277,7 @@ window.onLoad = init();
 
     <footer>
             <div class="container">
-              <div class="row span12">Copyright &copy;                    2013
+              <div class="row span12">Copyright &copy;                    2013-2014
                         <a href="http://www.apache.org">Apache Software Foundation</a>.
             All Rights Reserved.      
                     

Modified: incubator/falcon/site/wiki/HowToContribute.html
URL: http://svn.apache.org/viewvc/incubator/falcon/site/wiki/HowToContribute.html?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/site/wiki/HowToContribute.html (original)
+++ incubator/falcon/site/wiki/HowToContribute.html Thu Feb  6 07:37:58 2014
@@ -1,13 +1,13 @@
 <!DOCTYPE html>
 <!--
- | Generated by Apache Maven Doxia at Oct 28, 2013
+ | Generated by Apache Maven Doxia at Feb 6, 2014
  | Rendered using Apache Maven Fluido Skin 1.3.0
 -->
 <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20131028" />
+    <meta name="Date-Revision-yyyymmdd" content="20140206" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Falcon - How To Contribute</title>
     <link rel="stylesheet" href="../css/apache-maven-fluido-1.3.0.min.css" />
@@ -99,6 +99,9 @@
                       <li>      <a href="http://www.apache.org/dist/incubator/falcon/0.3-incubating"  title="0.3-incubating">0.3-incubating</a>
 </li>
                   
+                      <li>      <a href="http://www.apache.org/dist/incubator/falcon/0.4-incubating"  title="0.4-incubating">0.4-incubating</a>
+</li>
+                  
                       <li>      <a href="https://cwiki.apache.org/confluence/display/FALCON/Roadmap"  title="Roadmap">Roadmap</a>
 </li>
                           </ul>
@@ -112,6 +115,9 @@
                   
                       <li>      <a href="../0.3-incubating/index.html"  title="0.3-incubating">0.3-incubating</a>
 </li>
+                  
+                      <li>      <a href="../0.4-incubating/index.html"  title="0.4-incubating">0.4-incubating</a>
+</li>
                           </ul>
       </li>
                 <li class="dropdown">
@@ -144,6 +150,9 @@
                   
                       <li>      <a href="../docs/restapi/ResourceList.html"  title="Rest API">Rest API</a>
 </li>
+                  
+                      <li>      <a href="../docs/HiveIntegration.html"  title="Hive Integration">Hive Integration</a>
+</li>
                           </ul>
       </li>
                 <li class="dropdown">
@@ -227,7 +236,7 @@
         
                 
                     
-                  <li id="publishDate" class="pull-right">Last Published: 2013-10-28</li> 
+                  <li id="publishDate" class="pull-right">Last Published: 2014-02-06</li> 
             
                             </ul>
       </div>
@@ -244,7 +253,7 @@
 
     <footer>
             <div class="container">
-              <div class="row span12">Copyright &copy;                    2013
+              <div class="row span12">Copyright &copy;                    2013-2014
                         <a href="http://www.apache.org">Apache Software Foundation</a>.
             All Rights Reserved.      
                     

Modified: incubator/falcon/trunk/general/pom.xml
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/pom.xml?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/pom.xml (original)
+++ incubator/falcon/trunk/general/pom.xml Thu Feb  6 07:37:58 2014
@@ -22,10 +22,10 @@
     <parent>
         <groupId>org.apache.falcon</groupId>
         <artifactId>falcon-website</artifactId>
-        <version>0.4-SNAPSHOT</version>
+        <version>0.5-SNAPSHOT</version>
     </parent>
     <artifactId>falcon-website-general</artifactId>
-    <version>0.4-SNAPSHOT</version>
+    <version>0.5-SNAPSHOT</version>
     <packaging>war</packaging>
 
     <name>Apache Falcon - General</name>

Modified: incubator/falcon/trunk/general/src/site/site.xml
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/site.xml?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/site.xml (original)
+++ incubator/falcon/trunk/general/src/site/site.xml Thu Feb  6 07:37:58 2014
@@ -95,12 +95,14 @@
 
         <menu name="Releases">
             <item name="0.3-incubating" href="http://www.apache.org/dist/incubator/falcon/0.3-incubating"/>
+            <item name="0.4-incubating" href="http://www.apache.org/dist/incubator/falcon/0.4-incubating"/>
             <item name="Roadmap" href="https://cwiki.apache.org/confluence/display/FALCON/Roadmap"/>
         </menu>
 
         <menu name="Documentation">
             <item name="current" href="./docs/GettingStarted.html"/>
             <item name="0.3-incubating" href="./0.3-incubating/index.html"/>
+            <item name="0.4-incubating" href="./0.4-incubating/index.html"/>
         </menu>
 
         <menu name="Resources">
@@ -113,6 +115,7 @@
             <item name="Entity Specification" href="./docs/EntitySpecification.html"/>
             <item name="Client (Falcon CLI)" href="./docs/FalconCLI.html"/>
             <item name="Rest API" href="./docs/restapi/ResourceList.html"/>
+            <item name="Hive Integration" href="./docs/HiveIntegration.html"/>
         </menu>
 
         <menu name="ASF">

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/EntitySpecification.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/EntitySpecification.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/EntitySpecification.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/EntitySpecification.twiki Thu Feb  6 07:37:58 2014
@@ -44,6 +44,14 @@ A workflow interface specifies the inter
 Falcon uses this interface to schedule the processes referencing this cluster on workflow engine defined here.
 
 <verbatim>
+<interface type="registry" endpoint="thrift://localhost:9083" version="0.11.0" />
+</verbatim>
+A registry interface specifies the interface for metadata catalog, such as Hive Metastore (or HCatalog).
+Falcon uses this interface to register/de-register partitions for a given database and table. Also,
+uses this information to schedule data availability events based on partitions in the workflow engine.
+Although Hive metastore supports both RPC and HTTP, Falcon comes with an implementation for RPC over thrift.
+
+<verbatim>
 <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true" version="5.4.6" />
 </verbatim>
 A messaging interface specifies the interface for sending feed availability messages, it's endpoint is broker url with tcp address.
@@ -76,41 +84,20 @@ xmlns:xsi="http://www.w3.org/2001/XMLSch
 </verbatim>
 A feed should have a unique name and this name is referenced by processes as input or output feed.
 
-<verbatim>
-   <partitions>
-        <partition name="country" />
-        <partition name="cluster" />
-    </partitions>
-</verbatim>
-A feed can define multiple partitions, if a referenced cluster defines partitions then the number of partitions in feed has to be equal to or more than the cluster partitions.
-
-<verbatim>
-    <groups>online,bi</groups>
-</verbatim>
-A feed specifies a list of comma separated groups, a group is a logical grouping of feeds and a group is said to be
-available if all the feeds belonging to a group are available. The frequency of all the feed which belong to the same group
-must be same.
+---+++ Storage
+Falcon introduces a new abstraction to encapsulate the storage for a given feed which can either be
+expressed as a path on the file system, File System Storage or a table in a catalog such as Hive, Catalog Storage.
 
 <verbatim>
-    <availabilityFlag>_SUCCESS</availabilityFlag>
+    <xs:choice minOccurs="1" maxOccurs="1">
+        <xs:element type="locations" name="locations"/>
+        <xs:element type="catalog-table" name="table"/>
+    </xs:choice>
 </verbatim>
-An availabilityFlag specifies the name of a file which when present/created in a feeds data directory, 
-the feed is termed as available. ex: _SUCCESS, if this element is ignored then Falcon would consider the presence of feed's
-data directory as feed availability.
 
-<verbatim>
-    <frequency>minutes(20)</frequency>
-</verbatim>
-A feed has a frequency which specifies the frequency by which this feed is generated. 
-ex: it can be generated every hour, every 5 minutes, daily, weekly etc.
-valid frequency type for a feed are minutes, hours, days, months. The values can be negative, zero or positive.
+Feed should contain one of the two storage options. Locations on File System or Table in a Catalog.
 
-<verbatim>
-    <late-arrival cut-off="hours(6)" />
-</verbatim>
-A late-arrival specifies the cut-off period till which the feed is expected to arrive late and should be honored be processes referring to it as input feed by rerunning the instances in case the data arrives late with in a cut-off period.
-The cut-off period is specified by expression frequency(times), ex: if the feed can arrive late
-upto 8 hours then late-arrival's cut-off="hours(8)"
+---++++ File System Storage
 
 <verbatim>
         <clusters>
@@ -131,7 +118,7 @@ Validity of a feed on cluster specifies 
 Retention specifies how long the feed is retained on this cluster and the action to be taken on the feed after the expiry of retention period.
 The retention limit is specified by expression frequency(times), ex: if feed should be retained for at least 6 hours then retention's limit="hours(6)".
 The field partitionExp contains partition tags. Number of partition tags has to be equal to number of partitions specified in feed schema. A partition tag can be a wildcard(*), a static string or an expression. Atleast one of the strings has to be an expression.
-Location specifies where the feed is available on this cluster. This is an optional parameter and path can be same or different from the global locations tag value ( it is mentioned outside the clusters tag ) . This tag provides the user to flexibility to have feed at different locations on different clusters. If this attribute is missing then the default global location is picked from the feed definition. Also the individual location tags data, stats, meta are optional. 
+Location specifies where the feed is available on this cluster. This is an optional parameter and path can be same or different from the global locations tag value ( it is mentioned outside the clusters tag ) . This tag provides the user to flexibility to have feed at different locations on different clusters. If this attribute is missing then the default global location is picked from the feed definition. Also the individual location tags data, stats, meta are optional.
 
 <verbatim>
  <location type="data" path="/projects/falcon/clicks" />
@@ -145,15 +132,107 @@ The granularity of date pattern in the p
 Other location type which are supported are stats and meta paths, if a process references a feed then the meta and stats
 paths are available as a property in a process.
 
+---++++ Catalog Storage (Table)
+
+A table tag specifies the table URI in the catalog registry as:
+<verbatim>
+catalog:$database-name:$table-name#partition-key=partition-value);partition-key=partition-value);*
+</verbatim>
+
+This is modeled as a URI (similar to an ISBN URI). It does not have any reference to Hive or HCatalog. Its quite
+generic so it can be tied to other implementations of a catalog registry. The catalog implementation specified
+in the startup config provides implementation for the catalog URI.
+
+Top-level partition has to be a dated pattern and the granularity of date pattern should be at least that
+of a frequency of a feed.
+
+<verbatim>
+    <xs:complexType name="catalog-table">
+        <xs:annotation>
+            <xs:documentation>
+                catalog specifies the uri of a Hive table along with the partition spec.
+                uri="catalog:$database:$table#(partition-key=partition-value);+"
+                Example: catalog:logs-db:clicks#ds=${YEAR}-${MONTH}-${DAY}
+            </xs:documentation>
+        </xs:annotation>
+        <xs:attribute type="xs:string" name="uri" use="required"/>
+    </xs:complexType>
+</verbatim>
+
+Examples:
+<verbatim>
+<table uri="catalog:default:clicks#ds=${YEAR}-${MONTH}-${DAY}-${HOUR};region=${region}" />
+<table uri="catalog:src_demo_db:customer_raw#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
+<table uri="catalog:tgt_demo_db:customer_bcp#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
+</verbatim>
+
+---+++ Partitions
+
+<verbatim>
+   <partitions>
+        <partition name="country" />
+        <partition name="cluster" />
+    </partitions>
+</verbatim>
+A feed can define multiple partitions, if a referenced cluster defines partitions then the number of partitions in feed has to be equal to or more than the cluster partitions.
+
+*Note:* This will only apply for FileSystem storage but not Table storage as partitions are defined and maintained in
+Hive (Hcatalog) registry.
+
+---+++ Groups
+
+<verbatim>
+    <groups>online,bi</groups>
+</verbatim>
+A feed specifies a list of comma separated groups, a group is a logical grouping of feeds and a group is said to be
+available if all the feeds belonging to a group are available. The frequency of all the feed which belong to the same group
+must be same.
+
+---+++ Availability Flags
+
+<verbatim>
+    <availabilityFlag>_SUCCESS</availabilityFlag>
+</verbatim>
+An availabilityFlag specifies the name of a file which when present/created in a feeds data directory, 
+the feed is termed as available. ex: _SUCCESS, if this element is ignored then Falcon would consider the presence of feed's
+data directory as feed availability.
+
+---+++ Frequency
+
+<verbatim>
+    <frequency>minutes(20)</frequency>
+</verbatim>
+A feed has a frequency which specifies the frequency by which this feed is generated. 
+ex: it can be generated every hour, every 5 minutes, daily, weekly etc.
+valid frequency type for a feed are minutes, hours, days, months. The values can be negative, zero or positive.
+
+---+++ Late Arrival
+
+<verbatim>
+    <late-arrival cut-off="hours(6)" />
+</verbatim>
+A late-arrival specifies the cut-off period till which the feed is expected to arrive late and should be honored be processes referring to it as input feed by rerunning the instances in case the data arrives late with in a cut-off period.
+The cut-off period is specified by expression frequency(times), ex: if the feed can arrive late
+upto 8 hours then late-arrival's cut-off="hours(8)"
+
+*Note:* This will only apply for FileSystem storage but not Table storage until a future time.
+
+---++++ Custom Properties
+
 <verbatim>
     <properties>
         <property name="tmpFeedPath" value="tmpFeedPathValue" />
         <property name="field2" value="value2" />
         <property name="queueName" value="hadoopQueue"/>
         <property name="jobPriority" value="VERY_HIGH"/>
+        <property name="timeout" value="hours(1)"/>
+        <property name="parallel" value="3"/>
     </properties>
 </verbatim>
-A key-value pair, which are propagated to the workflow engine. "queueName" and "jobPriority" are special properties available to user to specify the hadoop job queue and priority, the same value is used by Falcons launcher job.
+A key-value pair, which are propagated to the workflow engine. "queueName" and "jobPriority" are special properties
+available to user to specify the hadoop job queue and priority, the same value is used by Falcons launcher job.
+"timeout" and "parallel" are other special properties which decides replication instance's timeout value while
+waiting for the feed instance and parallel decides the concurrent replication instances that can run at any given time.
  
 ---++ Process Specification
 A process defines configuration for a workflow. A workflow is a directed acyclic graph(DAG) which defines the job for the workflow engine. A process definition defines  the configurations required to run the workflow job. For example, process defines the frequency at which the workflow should run, the clusters on which the workflow should run, the inputs and outputs for the workflow, how the workflow failures should be handled, how the late inputs should be handled and so on.  
@@ -304,11 +383,70 @@ Example:
 ...
 </process>
 </verbatim>
-The input for the workflow is a hourly feed and takes 0th and 1st hour data of today(the day when the workflow runs). If the workflow is running for 2012-03-01T06:40Z, the inputs are /projects/bootcamp/feed1/2012-03-01-00/*/US and /projects/bootcamp/feed1/2012-03-01-01/*/US. The property for this input is
+The input for the workflow is a hourly feed and takes 0th and 1st hour data of today(the day when the workflow runs).
+If the workflow is running for 2012-03-01T06:40Z, the inputs are /projects/bootcamp/feed1/2012-03-01-00/*/US and
+/projects/bootcamp/feed1/2012-03-01-01/*/US. The property for this input is
 input1=/projects/bootcamp/feed1/2012-03-01-00/*/US,/projects/bootcamp/feed1/2012-03-01-01/*/US
 
+Also, feeds with Hive table storage can be used as inputs to a process. Several parameters from inputs are passed as
+params to the user workflow or pig script.
+
+<verbatim>
+    ${wf:conf('falcon_input_database')} - database name associated with the feed for a given input
+    ${wf:conf('falcon_input_table')} - table name associated with the feed for a given input
+    ${wf:conf('falcon_input_catalog_url')} - Hive metastore URI for this input feed
+    ${wf:conf('falcon_input_partition_filter_pig')} - value of ${coord:dataInPartitionFilter('$input', 'pig')}
+    ${wf:conf('falcon_input_partition_filter_hive')} - value of ${coord:dataInPartitionFilter('$input', 'hive')}
+    ${wf:conf('falcon_input_partition_filter_java')} - value of ${coord:dataInPartitionFilter('$input', 'java')}
+</verbatim>
+
+*NOTE:* input is the name of the input configured in the process, which is input.getName().
+<verbatim><input name="input" feed="clicks-raw-table" start="yesterday(0,0)" end="yesterday(20,0)"/></verbatim>
+
+Example workflow configuration:
+
+<verbatim>
+<configuration>
+  <property>
+    <name>falcon_input_database</name>
+    <value>falcon_db</value>
+  </property>
+  <property>
+    <name>falcon_input_table</name>
+    <value>input_table</value>
+  </property>
+  <property>
+    <name>falcon_input_catalog_url</name>
+    <value>thrift://localhost:29083</value>
+  </property>
+  <property>
+    <name>falcon_input_storage_type</name>
+    <value>TABLE</value>
+  </property>
+  <property>
+    <name>feedInstancePaths</name>
+    <value>hcat://localhost:29083/falcon_db/output_table/ds=2012-04-21-00</value>
+  </property>
+  <property>
+    <name>falcon_input_partition_filter_java</name>
+    <value>(ds='2012-04-21-00')</value>
+  </property>
+  <property>
+    <name>falcon_input_partition_filter_hive</name>
+    <value>(ds='2012-04-21-00')</value>
+  </property>
+  <property>
+    <name>falcon_input_partition_filter_pig</name>
+    <value>(ds=='2012-04-21-00')</value>
+  </property>
+  ...
+</configuration>
+</verbatim>
+
+
 ---++++ Optional Inputs
-User can metion one or more inputs as optional inputs. In such cases the job does not wait on those inputs which are mentioned as optional. If they are present it considers them otherwise continue with the comlpulsury ones. 
+User can mention one or more inputs as optional inputs. In such cases the job does not wait on those inputs which are
+mentioned as optional. If they are present it considers them otherwise continue with the compulsory ones.
 Example:
 <verbatim>
 <feed name="feed1">
@@ -333,6 +471,8 @@ Example:
 </process>
 </verbatim>
 
+*Note:* This is only supported for FileSystem storage but not Table storage at this point.
+
 
 ---++++ Outputs
 Outputs define the output data that is generated by the workflow. A process can define 0 or more outputs. Each output is mapped to a feed and the output path is picked up from feed definition. The output instance that should be generated is specified in terms of [[FalconDocumentation][EL expression]].
@@ -370,11 +510,57 @@ Example:
 ...
 </process>
 </verbatim>
-The output of the workflow is feed instance for today. If the workflow is running for 2012-03-01T06:40Z, the workflow generates output /projects/bootcamp/feed2/2012-03-01. The property for this output that is available for workflow is:
-output1=/projects/bootcamp/feed2/2012-03-01
+The output of the workflow is feed instance for today. If the workflow is running for 2012-03-01T06:40Z,
+the workflow generates output /projects/bootcamp/feed2/2012-03-01. The property for this output that is available
+for workflow is: output1=/projects/bootcamp/feed2/2012-03-01
+
+Also, feeds with Hive table storage can be used as outputs to a process. Several parameters from outputs are passed as
+params to the user workflow or pig script.
+<verbatim>
+    ${wf:conf('falcon_output_database')} - database name associated with the feed for a given output
+    ${wf:conf('falcon_output_table')} - table name associated with the feed for a given output
+    ${wf:conf('falcon_output_catalog_url')} - Hive metastore URI for the given output feed
+    ${wf:conf('falcon_output_dataout_partitions')} - value of ${coord:dataOutPartitions('$output')}
+</verbatim>
+
+*NOTE:* output is the name of the output configured in the process, which is output.getName().
+<verbatim><output name="output" feed="clicks-summary-table" instance="today(0,0)"/></verbatim>
+
+Example workflow configuration:
+
+<verbatim>
+<configuration>
+  <property>
+    <name>falcon_output_database</name>
+    <value>falcon_db</value>
+  </property>
+  <property>
+    <name>falcon_output_table</name>
+    <value>output_table</value>
+  </property>
+  <property>
+    <name>falcon_output_catalog_url</name>
+    <value>thrift://localhost:29083</value>
+  </property>
+  <property>
+    <name>falcon_output_storage_type</name>
+    <value>TABLE</value>
+  </property>
+  <property>
+    <name>feedInstancePaths</name>
+    <value>hcat://localhost:29083/falcon_db/output_table/ds=2012-04-21-00</value>
+  </property>
+  <property>
+    <name>falcon_output_dataout_partitions</name>
+    <value>'ds=2012-04-21-00'</value>
+  </property>
+  ....
+</configuration>
+</verbatim>
 
 ---++++ Properties
-The properties are key value pairs that are passed to the workflow. These properties are optional and can be used in workflow to parameterize the workflow.
+The properties are key value pairs that are passed to the workflow. These properties are optional and can be used
+in workflow to parameterize the workflow.
 Synatx:
 <verbatim>
 <process name="[process name]">
@@ -392,12 +578,25 @@ queueName and jobPriority are special pr
         <property name="queueName" value="hadoopQueue"/>
         <property name="jobPriority" value="VERY_HIGH"/>
 </verbatim>
+
 ---++++ Workflow
-The workflow defines the workflow engine that should be used and the path to the workflow on hdfs. The workflow definition on hdfs contains the actual job that should run and it should confirm to the workflow specification of the engine specified. The libraries required by the workflow should be in lib folder inside the workflow path.
 
-The properties defined in the cluster and cluster properties(nameNode and jobTracker) will also be available for the workflow.
+The workflow defines the workflow engine that should be used and the path to the workflow on hdfs.
+The workflow definition on hdfs contains the actual job that should run and it should confirm to
+the workflow specification of the engine specified. The libraries required by the workflow should
+be in lib folder inside the workflow path.
+
+The properties defined in the cluster and cluster properties(nameNode and jobTracker) will also
+be available for the workflow.
+
+There are 2 engines supported today.
+
+---+++++ Oozie
+
+As part of oozie workflow engine support, users can embed a oozie workflow.
+Refer to oozie [[http://incubator.apache.org/oozie/overview.html][workflow overview]] and
+[[http://incubator.apache.org/oozie/docs/3.1.3/docs/WorkflowFunctionalSpec.html][workflow specification]] for details.
 
-As of now, only oozie workflow engine is supported. Refer to oozie [[http://incubator.apache.org/oozie/overview.html][workflow overview]] and [[http://incubator.apache.org/oozie/docs/3.1.3/docs/WorkflowFunctionalSpec.html][workflow specification]] for details.  
 Syntax:
 <verbatim>
 <process name="[process name]">
@@ -415,7 +614,48 @@ Example:
 ...
 </process>
 </verbatim>
-This defines the workflow engine to be oozie and the workflow xml is defined at /projects/bootcamp/workflow/workflow.xml. The libraries are at /projects/bootcamp/workflow/lib.
+
+This defines the workflow engine to be oozie and the workflow xml is defined at
+/projects/bootcamp/workflow/workflow.xml. The libraries are at /projects/bootcamp/workflow/lib.
+
+---+++++ Pig
+
+Falcon also adds the Pig engine which enables users to embed a Pig script as a process.
+
+Example:
+<verbatim>
+<process name="sample-process">
+...
+    <workflow engine="pig" path="/projects/bootcamp/pig.script"/>
+...
+</process>
+</verbatim>
+
+This defines the workflow engine to be pig and the pig script is defined at
+/projects/bootcamp/pig.script.
+
+Feeds with Hive table storage will send one more parameter apart from the general ones:
+<verbatim>$input_filter</verbatim>
+
+---+++++ Hive
+
+Falcon also adds the Hive engine as part of Hive Integration which enables users to embed a Hive script as a process.
+This would enable users to create materialized queries in a declarative way.
+
+Example:
+<verbatim>
+<process name="sample-process">
+...
+    <workflow engine="hive" path="/projects/bootcamp/hive-script.hql"/>
+...
+</process>
+</verbatim>
+
+This defines the workflow engine to be hive and the hive script is defined at
+/projects/bootcamp/hive-script.hql.
+
+Feeds with Hive table storage will send one more parameter apart from the general ones:
+<verbatim>$input_filter</verbatim>
 
 ---++++ Retry
 Retry policy defines how the workflow failures should be handled. Two retry policies are defined: backoff and exp-backoff(exponential backoff). Depending on the delay and number of attempts, the workflow is re-tried after specific intervals.
@@ -478,4 +718,6 @@ Example:
 ...
 </process>
 </verbatim>
-This late handling specifies that late data detection should run at feed's late cut-off which is 6 hours in this case. If there is late data, Falcon should run the workflow specified at /projects/bootcamp/workflow/lateinput1/workflow.xml
\ No newline at end of file
+This late handling specifies that late data detection should run at feed's late cut-off which is 6 hours in this case. If there is late data, Falcon should run the workflow specified at /projects/bootcamp/workflow/lateinput1/workflow.xml
+
+*Note:* This is only supported for FileSystem storage but not Table storage at this point.

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/FalconCLI.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/FalconCLI.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/FalconCLI.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/FalconCLI.twiki Thu Feb  6 07:37:58 2014
@@ -6,163 +6,82 @@ FalconCLI is a interface between user an
 
 ---+++Submit
 
-Entity submit action allows a new cluster/feed/process to be setup within Falcon. Submitted entity is not
-scheduled, meaning it would simply be in the configuration store within Falcon. Besides validating against
-the schema for the corresponding entity being added, the Falcon system would also perform inter-field
-validations within the configuration file and validations across dependent entities.
+Submit option is used to set up entity definition.
 
-<verbatim>
 Example: 
 $FALCON_HOME/bin/falcon entity -submit -type cluster -file /cluster/definition.xml
-</verbatim>
 
 Note: The url option in the above and all subsequent commands is optional. If not mentioned it will be picked from client.properties file. If the option is not provided and also not set in client.properties, Falcon CLI will fail.
 
 ---+++Schedule
 
-Feeds or Processes that are already submitted and present in the config store can be scheduled. Upon schedule,
-Falcon system wraps the required repeatable action as a bundle of oozie coordinators and executes them on the
-Oozie scheduler. (It is possible to extend Falcon to use an alternate workflow engine other than Oozie).
-Falcon overrides the workflow instance's external id in Oozie to reflect the process/feed and the nominal
-time. This external Id can then be used for instance management functions.
+Once submitted, an entity can be scheduled using schedule option. Process and feed can only be scheduled.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon entity  -type [process|feed] -name <<name>> -schedule
 
 Example:
 $FALCON_HOME/bin/falcon entity  -type process -name sampleProcess -schedule
-</verbatim>
 
 ---+++Suspend
 
-This action is applicable only on scheduled entity. This triggers suspend on the oozie bundle that was
-scheduled earlier through the schedule function. No further instances are executed on a suspended process/feed.
+Suspend on an entity results in suspension of the oozie bundle that was scheduled earlier through the schedule function. No further instances are executed on a suspended entity. Only schedulable entities(process/feed) can be suspended.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -suspend
-</verbatim>
 
 ---+++Resume
 
 Puts a suspended process/feed back to active, which in turn resumes applicable oozie bundle.
 
-<verbatim>
 Usage:
  $FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -resume
-</verbatim>
 
 ---+++Delete
 
-Delete operation on the entity removes any scheduled activity on the workflow engine, besides removing the
-entity from the falcon configuration store. Delete operation on an entity would only succeed if there are
-no dependent entities on the deleted entity.
+Delete removes the submitted entity definition for the specified entity and put it into the archive.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon entity  -type [cluster|feed|process] -name <<name>> -delete
-</verbatim>
 
 ---+++List
 
-List all the entities within the falcon config store for the entity type being requested. This will include
-both scheduled and submitted entity configurations.
+Entities of a particular type can be listed with list sub-command.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon entity -type [cluster|feed|process] -list
-</verbatim>
 
 ---+++Update
 
 Update operation allows an already submitted/scheduled entity to be updated. Cluster update is currently
-not allowed. Feed update can cause cascading update to all the processes already scheduled. The following
-set of actions are performed in Oozie to realize an update.
+not allowed.
 
-   * Suspend the previously scheduled Oozie coordinator. This is prevent any new action from being triggered.
-   * Update the coordinator to set the end time to "now"
-   * Resume the suspended coordiantors
-   * Schedule as per the new process/feed definition with the start time as "now"
-
-<verbatim>
 Usage:
-$FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -update
-</verbatim>
+$FALCON_HOME/bin/falcon entity  -type [feed|process] -name <<name>> -update [-effective <<effective time>>]
 
 ---+++Status
 
 Status returns the current status of the entity.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon entity -type [cluster|feed|process] -name <<name>> -status
-</verbatim>
 
 ---+++Dependency
 
-Returns the dependencies of the requested entity. Dependency list include both forward and backward
-dependencies (depends on & is dependent on). For ex, a feed would show process that are dependent on the
-feed and the clusters that it depends on.'
+With the use of dependency option, we can list all the entities on which the specified entity is dependent. For example for a feed, dependency return the cluster name and for process it returns all the input feeds, output feeds and cluster names.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon entity -type [cluster|feed|process] -name <<name>> -dependency
-</verbatim>
 
 ---+++Definition
 
-Gets the current entity definition as stored in the configuration store. Please note that user documentations
-in the entity will not be retained.
+Definition option returns the entity definition submitted earlier during submit step.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon entity -type [cluster|feed|process] -name <<name>> -definition
-</verbatim>
 
 ---++Instance Management Options
 
-Instance Manager gives user the option to control individual instances of the process based on their instance start time (start time of that instance). Start time needs to be given in standard TZ format. Example:   01 Jan 2012 01:00  => 2012-01-01T01:00Z
-
-All the instance management operations (except running) allow single instance or list of instance within a Date range to be acted on. Make sure the dates are valid. i.e are within the start and  end time of process itself.
-
-For every query in instance management the process name is a compulsory parameter.
-
-Parameters -start and -end are used to mention the date range within which you want the instance to be operated upon.
-
--start:   using only  "-start" without  "-end"  will conduct the desired operation only on single instance given by date along with start.
-
--end:  "-end"  can only be used along with "-start" . It corresponds to the end date till which instance need to operated upon.
-
-   * 1. *status*: -status option via CLI can be used to get the status of a single or multiple instances.  If the instance is not yet materialized but is within the process validity range, WAITING is returned as the state.Along with the status of the instance log location is also returned.
-
-
-   * 2.	*running*: -running returns all the running instance of the process. It does not take any start or end dates but simply return all the instances in state RUNNING at that given time.
-
-   * 3.	*rerun*: -rerun is the option that you will use most often from instance management. As the name suggest this option is used to rerun a particular instance or instances of the process. The rerun option reruns all parent workflow for the instance, which in turn rerun all the sub-workflows for it. This option is valid for any instance in terminal state, i.e. KILLED, SUCCEEDED, FAILED. User can also set properties in the request, which will give options what types of actions should be rerun like, only failed, run all etc. These properties are dependent on the workflow engine being used along with falcon.
-
-   * 4. *suspend*: -suspend is used to suspend a instance or instances  for the given process. This option pauses the parent workflow at the state, which it was in at the time of execution of this command. This command is similar to SUSPEND process command in functionality only difference being, SUSPEND process suspends all the instance whereas suspend instance suspend only that instance or instances in the range.
-
-   * 5.	*resume*: -resume option is used to resume any instance that  is in suspended state.  (Note: due to a bug in oozie �resume option in some cases may not actually resume the suspended instance/ instances)
-   * 6. *kill*: -kill option can be used to kill an instance or multiple instances
-
-
-In all the cases where your request is syntactically correct but logically not, the instance / instances are returned with the same status as earlier. Example:  trying to resume a KILLED  / SUCCEEDED instance will return the instance with KILLED / SUCCEEDED, without actually performing any operation. This is so because only an instance in SUSPENDED state can be resumed. Same thing is valid for rerun a SUSPENDED or RUNNING options etc.
-
----+++Status
-
-Status option via CLI can be used to get the status of a single or multiple instances.  If the instance is not yet materialized but is within the process validity range, WAITING is returned as the state. Along with the status of the instance time is also returned. Log location gives the oozie workflow url
-If the instance is in WAITING state, missing dependencies are listed
-
-Example : Suppose a process has 3 instance, one has succeeded,one is in running state and other one is waiting, the expected output is:
-
-{"status":"SUCCEEDED","message":"getStatus is successful","instances":[{"instance":"2012-05-07T05:02Z","status":"SUCCEEDED","logFile":"http://oozie-dashboard-url"},{"instance":"2012-05-07T05:07Z","status":"RUNNING","logFile":"http://oozie-dashboard-url"}, {"instance":"2010-01-02T11:05Z","status":"WAITING"}]
-
-<verbatim>
-Usage:
-$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -status -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-</verbatim>
-
 ---+++Kill
 
 Kill sub-command is used to kill all the instances of the specified process whose nominal time is between the given start time and end time.
@@ -175,79 +94,86 @@ Example:   01 Jan 2012 01:00  => 2012-01
 
 3. Process name is compulsory parameter for each instance management command.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -kill -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-</verbatim>
 
 ---+++Suspend
 
 Suspend is used to suspend a instance or instances  for the given process. This option pauses the parent workflow at the state, which it was in at the time of execution of this command.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -suspend -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-</verbatim>
 
 ---+++Continue
 
 Continue option is used to continue the failed workflow instance. This option is valid only for process instances in terminal state, i.e. SUCCEDDED, KILLED or FAILED.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -re-run -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-</verbatim>
 
 ---+++Rerun
 
 Rerun option is used to rerun instances of a given process. This option is valid only for process instances in terminal state, i.e. SUCCEDDED, KILLED or FAILED. Optionally, you can specify the properties to override.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -re-run -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'" [-file <<properties file>>]
-</verbatim>
 
 ---+++Resume
 
 Resume option is used to resume any instance that  is in suspended state.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -resume -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
-</verbatim>
+
+---+++Status
+
+Status option via CLI can be used to get the status of a single or multiple instances.  If the instance is not yet materialized but is within the process validity range, WAITING is returned as the state. Along with the status of the instance time is also returned. Log location gives the oozie workflow url
+If the instance is in WAITING state, missing dependencies are listed
+
+Example : Suppose a process has 3 instance, one has succeeded,one is in running state and other one is waiting, the expected output is:
+
+{"status":"SUCCEEDED","message":"getStatus is successful","instances":[{"instance":"2012-05-07T05:02Z","status":"SUCCEEDED","logFile":"http://oozie-dashboard-url"},{"instance":"2012-05-07T05:07Z","status":"RUNNING","logFile":"http://oozie-dashboard-url"}, {"instance":"2010-01-02T11:05Z","status":"WAITING"}] 
+
+Usage:
+$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -status -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
+
+---+++Summary
+
+Summary option via CLI can be used to get the consolidated status of the instances between the specified time period.
+Each status along with the corresponding instance count are listed for each of the applicable colos.
+The unscheduled instances between the specified time period are included as UNSCHEDULED in the output to provide more clarity.
+
+Example : Suppose a process has 3 instance, one has succeeded,one is in running state and other one is waiting, the expected output is:
+
+{"status":"SUCCEEDED","message":"getSummary is successful", "cluster": <<name>> [{"SUCCEEDED":"1"}, {"WAITING":"1"}, {"RUNNING":"1"}]}
+
+Usage:
+$FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -summary -start "yyyy-MM-dd'T'HH:mm'Z'" -end "yyyy-MM-dd'T'HH:mm'Z'"
 
 ---+++Running
 
 Running option provides all the running instances of the mentioned process.
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -running
-</verbatim>
 
 ---+++Logs
 
 Get logs for instance actions
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon instance -type <<feed/process>> -name <<name>> -logs -start "yyyy-MM-dd'T'HH:mm'Z'" [-end "yyyy-MM-dd'T'HH:mm'Z'"] [-runid <<runid>>]
-</verbatim>
+
 
 ---++Admin Options
 
 ---+++Help
 
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon admin -version
-</verbatim>
 
 ---+++Version
 
 Version returns the current verion of Falcon installed.
-
-<verbatim>
 Usage:
 $FALCON_HOME/bin/falcon admin -help
-</verbatim>
\ No newline at end of file

Added: incubator/falcon/trunk/general/src/site/twiki/docs/HiveIntegration.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/HiveIntegration.twiki?rev=1565098&view=auto
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/HiveIntegration.twiki (added)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/HiveIntegration.twiki Thu Feb  6 07:37:58 2014
@@ -0,0 +1,342 @@
+---+ Hive Integration
+
+---++ Overview
+Falcon provides data management functions for feeds declaratively. It allows users to represent feed locations as
+time-based partition directories on HDFS containing files.
+
+Hive provides a simple and familiar database like tabular model of data management to its users,
+which are backed by HDFS. It supports two classes of tables, managed tables and external tables.
+
+Falcon allows users to represent feed location as Hive tables. Falcon supports both managed and external tables
+and provide data management services for tables such as replication, eviction, archival, etc. Falcon will notify
+HCatalog as a side effect of either acquiring, replicating or evicting a data set instance and adds the
+missing capability of HCatalog table replication.
+
+In the near future, Falcon will allow users to express pipeline processing in Hive scripts
+apart from Pig and Oozie workflows.
+
+
+---++ Assumptions
+   * Date is a mandatory first-level partition for Hive tables
+      * Data availability triggers are based on date pattern in Oozie
+   * Tables must be created in Hive prior to adding it as a Feed in Falcon.
+      * Duplicating this in Falcon will create confusion on the real source of truth. Also propagating schema changes
+    between systems is a hard problem.
+   * Falcon does not know about the encoding of the data and data should be in HCatalog supported format.
+
+---++ Configuration
+Falcon provides a system level option to enable Hive integration. Falcon must be configured with an implementation
+for the catalog registry. The default implementation for Hive is shipped with Falcon.
+
+<verbatim>
+catalog.service.impl=org.apache.falcon.catalog.HiveCatalogService
+</verbatim>
+
+
+---++ Incompatible changes
+Falcon depends heavily on data-availability triggers for scheduling Falcon workflows. Oozie must support
+data-availability triggers based on HCatalog partition availability. This is only available in oozie 4.x.
+
+Hence, Falcon for Hive support needs Oozie 4.x.
+
+
+---++ Oozie Shared Library setup
+Falcon post Hive integration depends heavily on the [[http://oozie.apache.org/docs/4.0.0/WorkflowFunctionalSpec.html#a17_HDFS_Share_Libraries_for_Workflow_Applications_since_Oozie_2.3][shared library feature of Oozie]].
+Since the sheer number of jars for HCatalog, Pig and Hive are in the many 10s in numbers, its quite daunting to
+redistribute the dependent jars from Falcon.
+
+[[http://oozie.apache.org/docs/4.0.0/DG_QuickStart.html#Oozie_Share_Lib_Installation][This is a one time effort in Oozie setup and is quite straightforward.]]
+
+
+---++ Approach
+
+---+++ Entity Changes
+
+   * Cluster DSL will have an additional registry-interface section, specifying the endpoint for the
+HCatalog server. If this is absent, no HCatalog publication will be done from Falcon for this cluster.
+      <verbatim>thrift://hcatalog-server:port</verbatim>
+   * Feed DSL will allow users to specify the URI (location) for HCatalog tables as:
+      <verbatim>catalog:database_name:table_name#partitions(key=value?)*</verbatim>
+   * Failure to publish to HCatalog will be retried (configurable # of retires) with back off. Permanent failures
+   after all the retries are exhausted will fail the Falcon workflow
+
+---+++ Eviction
+
+   * Falcon will construct DDL statements to filter candidate partitions eligible for eviction drop partitions
+   * Falcon will construct DDL statements to drop the eligible partitions
+   * Additionally, Falcon will nuke the data on HDFS for external tables
+
+
+---+++ Replication
+
+   * Falcon will use HCatalog (Hive) API to export the data for a given table and the partition,
+which will result in a data collection that includes metadata on the data's storage format, the schema,
+how the data is sorted, what table the data came from, and values of any partition keys from that table.
+   * Falcon will use DistCp tool to copy the exported data collection into the secondary cluster into a staging
+directory used by Falcon.
+   * Falcon will then import the data into HCatalog (Hive) using the HCatalog (Hive) API. If the specified table does
+not yet exist, Falcon will create it, using the information in the imported metadata to set defaults for the
+table such as schema, storage format, etc.
+   * The partition is not complete and hence not visible to users until all the data is committed on the secondary
+cluster, (no dirty reads)
+   * Data collection is staged by Falcon and retries for copy continues from where it left off.
+   * Failure to register with Hive will be retired. After all the attempts are exhausted,
+the data will be cleaned up by Falcon.
+
+
+---+++ Security
+The user owns all data managed by Falcon. Falcon runs as the user who submitted the feed. Falcon will authenticate
+with HCatalog as the end user who owns the entity and the data.
+
+For Hive managed tables, the table may be owned by the end user or “hive”. For “hive” owned tables,
+user will have to configure the feed as “hive”.
+
+
+---++ Load on HCatalog from Falcon
+It generally depends on the frequency of the feeds configured in Falcon and how often data is ingested, replicated,
+or processed.
+
+
+---++ User Impact
+   * There should not be any impact to user due to this integration
+   * Falcon will be fully backwards compatible 
+   * Users have a choice to either choose storage based on files on HDFS as they do today or use HCatalog for
+accessing the data in tables
+
+
+---++ Known Limitations
+
+---+++ Oozie
+
+   * Falcon with Hadoop 1.x requires copying guava jars manually to sharelib in oozie. Hadoop 2.x ships this.
+   * hcatalog-pig-adapter needs to be copied manually to oozie sharelib.
+<verbatim>
+bin/hadoop dfs -copyFromLocal $LFS/share/lib/hcatalog/hcatalog-pig-adapter-0.5.0-incubating.jar share/lib/hcatalog
+</verbatim>
+
+---+++ Hive
+
+   * [[https://issues.apache.org/jira/browse/HIVE-5550][Hive table import fails for tables created with default text and sequence file formats using HCatalog API]]
+For some arcane reason, hive substitutes the output format for text and sequence to be prefixed with Hive.
+Hive table import fails since it compares against the input and output formats of the source table and they are
+different. Say, a table was created with out specifying the file format, it defaults to:
+<verbatim>
+fileFormat=TextFile, inputformat=org.apache.hadoop.mapred.TextInputFormat, outputformat=org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
+</verbatim>
+
+But, when hive fetches the table from the metastore, it replaces the output format with org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+and the comparison between source and target table fails.
+<verbatim>
+org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer#checkTable
+      // check IF/OF/Serde
+      String existingifc = table.getInputFormatClass().getName();
+      String importedifc = tableDesc.getInputFormat();
+      String existingofc = table.getOutputFormatClass().getName();
+      String importedofc = tableDesc.getOutputFormat();
+      if ((!existingifc.equals(importedifc))
+          || (!existingofc.equals(importedofc))) {
+        throw new SemanticException(
+            ErrorMsg.INCOMPATIBLE_SCHEMA
+                .getMsg(" Table inputformat/outputformats do not match"));
+      }
+</verbatim>
+
+
+---++ Hive Examples
+Following is an example entity configuration for lifecycle management functions for tables in Hive.
+
+---+++ Hive Table Lifecycle Management - Replication and Retention
+
+---++++ Primary Cluster
+
+<verbatim>
+<?xml version="1.0"?>
+<!--
+    Primary cluster configuration for demo vm
+  -->
+<cluster colo="west-coast" description="Primary Cluster"
+         name="primary-cluster"
+         xmlns="uri:falcon:cluster:0.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+    <interfaces>
+        <interface type="readonly" endpoint="hftp://localhost:10070"
+                   version="1.1.1" />
+        <interface type="write" endpoint="hdfs://localhost:10020"
+                   version="1.1.1" />
+        <interface type="execute" endpoint="localhost:10300"
+                   version="1.1.1" />
+        <interface type="workflow" endpoint="http://localhost:11010/oozie/"
+                   version="3.3.0" />
+        <interface type="registry" endpoint="thrift://localhost:19083"
+                   version="0.11.0" />
+        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
+                   version="5.4.3" />
+    </interfaces>
+    <locations>
+        <location name="staging" path="/apps/falcon/staging" />
+        <location name="temp" path="/tmp" />
+        <location name="working" path="/apps/falcon/working" />
+    </locations>
+</cluster>
+</verbatim>
+
+---++++ BCP Cluster
+
+<verbatim>
+<?xml version="1.0"?>
+<!--
+    BCP cluster configuration for demo vm
+  -->
+<cluster colo="east-coast" description="BCP Cluster"
+         name="bcp-cluster"
+         xmlns="uri:falcon:cluster:0.1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+    <interfaces>
+        <interface type="readonly" endpoint="hftp://localhost:20070"
+                   version="1.1.1" />
+        <interface type="write" endpoint="hdfs://localhost:20020"
+                   version="1.1.1" />
+        <interface type="execute" endpoint="localhost:20300"
+                   version="1.1.1" />
+        <interface type="workflow" endpoint="http://localhost:11020/oozie/"
+                   version="3.3.0" />
+        <interface type="registry" endpoint="thrift://localhost:29083"
+                   version="0.11.0" />
+        <interface type="messaging" endpoint="tcp://localhost:61616?daemon=true"
+                   version="5.4.3" />
+    </interfaces>
+    <locations>
+        <location name="staging" path="/apps/falcon/staging" />
+        <location name="temp" path="/tmp" />
+        <location name="working" path="/apps/falcon/working" />
+    </locations>
+</cluster>
+</verbatim>
+
+---++++ Feed with replication and eviction policy
+
+<verbatim>
+<?xml version="1.0"?>
+<!--
+    Replicating Hourly customer table from primary to secondary cluster.
+  -->
+<feed description="Replicating customer table feed" name="customer-table-replicating-feed"
+      xmlns="uri:falcon:feed:0.1">
+    <frequency>hours(1)</frequency>
+    <timezone>UTC</timezone>
+
+    <clusters>
+        <cluster name="primary-cluster" type="source">
+            <validity start="2013-09-24T00:00Z" end="2013-10-26T00:00Z"/>
+            <retention limit="hours(2)" action="delete"/>
+        </cluster>
+        <cluster name="bcp-cluster" type="target">
+            <validity start="2013-09-24T00:00Z" end="2013-10-26T00:00Z"/>
+            <retention limit="days(30)" action="delete"/>
+
+            <table uri="catalog:tgt_demo_db:customer_bcp#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
+        </cluster>
+    </clusters>
+
+    <table uri="catalog:src_demo_db:customer_raw#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
+
+    <ACL owner="seetharam" group="users" permission="0755"/>
+    <schema location="" provider="hcatalog"/>
+</feed>
+</verbatim>
+
+
+---+++ Hive Table used in Processing Pipelines
+
+---++++ Primary Cluster
+The cluster definition from the lifecycle example can be used.
+
+---++++ Input Feed
+
+<verbatim>
+<?xml version="1.0"?>
+<feed description="clicks log table " name="input-table" xmlns="uri:falcon:feed:0.1">
+    <groups>online,bi</groups>
+    <frequency>hours(1)</frequency>
+    <timezone>UTC</timezone>
+
+    <clusters>
+        <cluster name="##cluster##" type="source">
+            <validity start="2010-01-01T00:00Z" end="2012-04-21T00:00Z"/>
+            <retention limit="hours(24)" action="delete"/>
+        </cluster>
+    </clusters>
+
+    <table uri="catalog:falcon_db:input_table#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
+
+    <ACL owner="testuser" group="group" permission="0x755"/>
+    <schema location="/schema/clicks" provider="protobuf"/>
+</feed>
+</verbatim>
+
+
+---++++ Output Feed
+
+<verbatim>
+<?xml version="1.0"?>
+<feed description="clicks log identity table" name="output-table" xmlns="uri:falcon:feed:0.1">
+    <groups>online,bi</groups>
+    <frequency>hours(1)</frequency>
+    <timezone>UTC</timezone>
+
+    <clusters>
+        <cluster name="##cluster##" type="source">
+            <validity start="2010-01-01T00:00Z" end="2012-04-21T00:00Z"/>
+            <retention limit="hours(24)" action="delete"/>
+        </cluster>
+    </clusters>
+
+    <table uri="catalog:falcon_db:output_table#ds=${YEAR}-${MONTH}-${DAY}-${HOUR}" />
+
+    <ACL owner="testuser" group="group" permission="0x755"/>
+    <schema location="/schema/clicks" provider="protobuf"/>
+</feed>
+</verbatim>
+
+
+---++++ Process
+
+<verbatim>
+<?xml version="1.0"?>
+<process name="##processName##" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <cluster name="##cluster##">
+            <validity end="2012-04-22T00:00Z" start="2012-04-21T00:00Z"/>
+        </cluster>
+    </clusters>
+
+    <parallel>1</parallel>
+    <order>FIFO</order>
+    <frequency>days(1)</frequency>
+    <timezone>UTC</timezone>
+
+    <inputs>
+        <input end="today(0,0)" start="today(0,0)" feed="input-table" name="input"/>
+    </inputs>
+
+    <outputs>
+        <output instance="now(0,0)" feed="output-table" name="output"/>
+    </outputs>
+
+    <properties>
+        <property name="blah" value="blah"/>
+    </properties>
+
+    <workflow engine="pig" path="/falcon/test/apps/pig/table-id.pig"/>
+
+    <retry policy="periodic" delay="minutes(10)" attempts="3"/>
+</process>
+</verbatim>
+
+
+---++++ Pig Script
+
+<verbatim>
+A = load '$input_database.$input_table' using org.apache.hcatalog.pig.HCatLoader();
+B = FILTER A BY $input_filter;
+C = foreach B generate id, value;
+store C into '$output_database.$output_table' USING org.apache.hcatalog.pig.HCatStorer('$output_dataout_partitions');
+</verbatim>

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/InstallationSteps.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/InstallationSteps.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/InstallationSteps.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/InstallationSteps.twiki Thu Feb  6 07:37:58 2014
@@ -8,10 +8,11 @@ git clone https://git-wip-us.apache.org/
 
 cd falcon
 
-export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=256m" && mvn clean install
+export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=256m" && mvn clean install [For hadoop 1]
+export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=256m" && mvn clean install -Phadoop-2 [For hadoop 2]
 
 [optionally -Dhadoop.version=<<hadoop.version>> can be appended to build for a specific version of hadoop]
-[Falcon has currently not been tested with secure Hadoop / Hadoop 2.0]
+[optionally -Doozie.version=<<oozie version>> can be appended to build with a specific version of oozie. Oozie versions >= 3.oozie-3.2.0-incubating are supported]
 
 </verbatim>
 
@@ -20,7 +21,8 @@ Once the build successfully completes, a
 *Embedded Mode*
 <verbatim>
 
-mvn clean assembly:assembly -DskipTests -DskipCheck=true
+mvn clean assembly:assembly -DskipTests -DskipCheck=true [For hadoop 1]
+mvn clean assembly:assembly -DskipTests -DskipCheck=true -P hadoop-2 [For hadoop 2]
 
 </verbatim>
 
@@ -34,46 +36,72 @@ Tar is structured as follows
    |- falcon
    |- falcon-start
    |- falcon-stop
+   |- falcon-config.sh
+   |- service-start.sh
+   |- service-stop.sh
 |- conf
    |- startup.properties
    |- runtime.properties
    |- client.properties
    |- log4j.xml
-|- src
+   |- falcon-env.sh
 |- docs
-|- apidocs
 |- client
    |- lib (client support libs)
 |- server
    |- webapp
       |- falcon.war
-|- logs (application log files & temp data files)
-   |- falcon.pid
-   
+|- hadooplibs
+|- README
+|- NOTICE.txt
+|- LICENSE.txt
+|- DISCLAIMER.txt
+|- CHANGES.txt
 </verbatim>
 
 *Distributed Mode*
 
 <verbatim>
 
-mvn clean assembly:assembly -DskipTests -DskipCheck=true -P distributed
+mvn clean assembly:assembly -DskipTests -DskipCheck=true -Pdistributed,hadoop-1 [For hadoop 1]
+mvn clean assembly:assembly -DskipTests -DskipCheck=true -Pdistributed,hadoop-2 [For hadoop 2]
 
 </verbatim>
 
-This generates 3 tars - {project dir}/target/falcon-${project.version}-prism.tar.gz, {project dir}/target/falcon-${project.version}-server.tar.gz and {project dir}/target/falcon-${project.version}-client.tar.gz
+Tar can be found in {project dir}/target/falcon-distributed-${project.version}-server.tar.gz
+
+Tar is structured as follows
 
-Package structure of prism and server tars is same as that of the embedded package with one difference that prism package contains prism war instead of falcon war. Client package is structured as follows 
- 
 <verbatim>
 
 |- bin
    |- falcon
+   |- falcon-start
+   |- falcon-stop
+   |- falcon-config.sh
+   |- service-start.sh
+   |- service-stop.sh
+   |- prism-stop
+   |- prism-start
 |- conf
+   |- startup.properties
+   |- runtime.properties
    |- client.properties
    |- log4j.xml
+   |- falcon-env.sh
+|- docs
 |- client
    |- lib (client support libs)
-   
+|- server
+   |- webapp
+      |- falcon.war
+      |- prism.war
+|- hadooplibs
+|- README
+|- NOTICE.txt
+|- LICENSE.txt
+|- DISCLAIMER.txt
+|- CHANGES.txt
 </verbatim>
 
 ---+++ Installing & running Falcon
@@ -81,7 +109,55 @@ Package structure of prism and server ta
 *Installing falcon*
 <verbatim>
 tar -xzvf {falcon package}
-cd falcon-server-${project.version} or cd falcon-prism-${project.version}
+cd falcon-distributed-${project.version} or falcon-${project.version}
+</verbatim>
+
+*Configuring Falcon*
+
+By default config directory used by falcon is {package dir}/conf. To override this set environment variable FALCON_CONF to the path of the conf dir.
+
+falcon-env.sh has been added to the falcon conf. This file can be used to set various environment variables that you need for you services.
+In addition you can set any other environment variables you might need. This file will be sourced by falcon scripts before any commands are executed. The following environment variables are available to set.
+
+<verbatim>
+# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
+#export JAVA_HOME=
+
+# any additional java opts you want to set. This will apply to both client and server operations
+#export FALCON_OPTS=
+
+# any additional java opts that you want to set for client only
+#export FALCON_CLIENT_OPTS=
+
+# java heap size we want to set for the client. Default is 1024MB
+#export FALCON_CLIENT_HEAP=
+
+# any additional opts you want to set for prism service.
+#export FALCON_PRISM_OPTS=
+
+# java heap size we want to set for the prism service. Default is 1024MB
+#export FALCON_PRISM_HEAP=
+
+# any additional opts you want to set for falcon service.
+#export FALCON_SERVER_OPTS=
+
+# java heap size we want to set for the falcon server. Default is 1024MB
+#export FALCON_SERVER_HEAP=
+
+# What is is considered as falcon home dir. Default is the base location of the installed software
+#export FALCON_HOME_DIR=
+
+# Where log files are stored. Default is logs directory under the base install location
+#export FALCON_LOG_DIR=
+
+# Where pid files are stored. Default is logs directory under the base install location
+#export FALCON_PID_DIR=
+
+# where the falcon active mq data is stored. Default is logs/data directory under the base install location
+#export FALCON_DATA_DIR=
+
+# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
+#export FALCON_EXPANDED_WEBAPP_DIR=
 </verbatim>
 
 *Starting Falcon Server*
@@ -90,12 +166,23 @@ bin/falcon-start [-port <port>]
 </verbatim>
 
 By default, 
-- falcon server starts at port 15000. To change the port, use -port option
-- falcon server starts embedded active mq. To control this behaviour, set the following system properties using -D option in environment variable FALCON_OPTS:
-  - falcon.embeddedmq=<true/false> - Should server start embedded active mq, default true
-  - falcon.emeddedmq.port=<port> - Port for embedded active mq, default 61616
-  - falcon.embeddedmq.data=<path> - Data path for embedded active mq, default {package dir}/logs/data
-- falcon server starts with conf from {package dir}/conf. To override this (to use the same conf with multiple falcon upgrades), set environment variable FALCON_CONF to the path of conf dir
+* falcon server starts at port 15000. To change the port, use -port option
+* falcon server starts embedded active mq. To control this behaviour, set the following system properties using -D option in environment variable FALCON_OPTS:
+   * falcon.embeddedmq=<true/false> - Should server start embedded active mq, default true
+   * falcon.emeddedmq.port=<port> - Port for embedded active mq, default 61616
+   * falcon.embeddedmq.data=<path> - Data path for embedded active mq, default {package dir}/logs/data
+* falcon server starts with conf from {package dir}/conf. To override this (to use the same conf with multiple falcon upgrades), set environment variable FALCON_CONF to the path of conf dir
+
+__Adding Extension Libraries__
+Library extensions allows users to add custom libraries to entity lifecycles such as feed retention, feed replication and process execution. This is useful for usecases such as adding filesystem extensions. To enable this, add the following configs to startup.properties:
+*.libext.paths=<paths to be added to all entity lifecycles>
+*.libext.feed.paths=<paths to be added to all feed lifecycles>
+*.libext.feed.retentions.paths=<paths to be added to feed retention workflow>
+*.libext.feed.replication.paths=<paths to be added to feed replication workflow>
+*.libext.process.paths=<paths to be added to process workflow>
+
+The configured jars are added to falcon classpath and the corresponding workflows
+
 
 *Starting Prism*
 <verbatim>
@@ -103,8 +190,8 @@ bin/prism-start [-port <port>]
 </verbatim>
 
 By default, 
-- falcon server starts at port 16000. To change the port, use -port option
-- prism starts with conf from {package dir}/conf. To override this (to use the same conf with multiple prism upgrades), set environment variable FALCON_CONF to the path of conf dir
+* falcon server starts at port 16000. To change the port, use -port option
+* prism starts with conf from {package dir}/conf. To override this (to use the same conf with multiple prism upgrades), set environment variable FALCON_CONF to the path of conf dir
 
 *Using Falcon*
 <verbatim>
@@ -117,6 +204,10 @@ bin/falcon help
 (for more details about falcon cli usage)
 </verbatim>
 
+*Dashboard*
+
+Once falcon / prism is started, you can view the status of falcon entities using the Web-based dashboard. The web UI works in both distributed and embedded mode. You can open your browser at the corresponding port to use the web UI.
+
 *Stopping Falcon Server*
 <verbatim>
 bin/falcon-stop
@@ -127,12 +218,46 @@ bin/falcon-stop
 bin/prism-stop
 </verbatim>
 
----+++ Preparing oozie bundle for use with Falcon
+---+++ Preparing Oozie and Falcon packages for deployment
 <verbatim>
 cd <<project home>>
-mkdir target/package
-src/bin/pacakge.sh <<hadoop-version>>
+src/bin/package.sh <<hadoop-version>> <<oozie-version>>
+
+>> ex. src/bin/package.sh 1.1.2 3.1.3-incubating or src/bin/package.sh 0.20.2-cdh3u5 4.0.0
+>> Falcon package is available in <<falcon home>>/target/falcon-<<version>>-bin.tar.gz
+>> Oozie package is available in <<falcon home>>/target/oozie-3.3.2-distro.tar.gz
+</verbatim>
+
+---+++ Running Examples using embedded package
+<verbatim>
+bin/falcon-start
+</verbatim>
+Make sure the hadoop and oozie endpoints are according to your setup in examples/entity/standalone-cluster.xml
+<verbatim>
+bin/falcon entity -submit -type cluster -file examples/entity/standalone-cluster.xml
+</verbatim>
+Submit input and output feeds:
+<verbatim>
+bin/falcon entity -submit -type feed -file examples/entity/in-feed.xml
+bin/falcon entity -submit -type feed -file examples/entity/out-feed.xml
+</verbatim>
+Set-up workflow for the process:
+<verbatim>
+hadoop fs -put examples/app /
+</verbatim>
+Submit and schedule the process:
+<verbatim>
+bin/falcon entity -submitAndSchedule -type process -file examples/entity/oozie-mr-process.xml
+bin/falcon entity -submitAndSchedule -type process -file examples/entity/pig-process.xml
+</verbatim>
+Generate input data:
+<verbatim>
+examples/data/generate.sh <<hdfs endpoint>>
+</verbatim>
+Get status of instances:
+<verbatim>
+bin/falcon instance -status -type process -name oozie-mr-process -start 2013-11-15T00:05Z -end 2013-11-15T01:00Z
+</verbatim>
+
+
 
->> ex. src/bin/pacakge.sh 1.1.2 or src/bin/pacakge.sh 0.20.2-cdh3u5
->> oozie bundle available in target/package/oozie-3.2.0-incubating/distro/target/oozie-3.2.2-distro.tar.gz
-</verbatim>
\ No newline at end of file

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/OnBoarding.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/OnBoarding.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/OnBoarding.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/OnBoarding.twiki Thu Feb  6 07:37:58 2014
@@ -1,6 +1,7 @@
 ---++ Contents
    * <a href="#Onboarding Steps">Onboarding Steps</a>
    * <a href="#Sample Pipeline">Sample Pipeline</a>
+   * [[HiveIntegration][Hive Examples]]
 
 ---+++ Onboarding Steps
    * Create cluster definition for the cluster, specifying name node, job tracker, workflow engine endpoint, messaging endpoint. Refer to [[EntitySpecification][cluster definition]] for details.

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/restapi/AdminVersion.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/restapi/AdminVersion.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/restapi/AdminVersion.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/restapi/AdminVersion.twiki Thu Feb  6 07:37:58 2014
@@ -21,7 +21,15 @@ Remote-User: rgautam
 ---+++ Result
 <verbatim>
 {
-    Version:"0.4-incubating-SNAPSHOT-rc0f2701549628f2f97746bd024518512c07d5442",
-    Mode:"embedded"
+    "properties":[
+        {
+            "key":"Version",
+            "value":"0.4-incubating-SNAPSHOT-rb47788d1112fcf949c22a3860934167237b395b0"
+        },
+        {
+            "key":"Mode",
+            "value":"embedded"
+        }
+    ]
 }
 </verbatim>

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/restapi/EntityList.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/restapi/EntityList.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/restapi/EntityList.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/restapi/EntityList.twiki Thu Feb  6 07:37:58 2014
@@ -1,4 +1,4 @@
----++  GET /api/entities/list/:entity-type
+---++  GET /api/entities/list/:entity-type?fields=:fields
    * <a href="#Description">Description</a>
    * <a href="#Parameters">Parameters</a>
    * <a href="#Results">Results</a>
@@ -9,6 +9,8 @@ Get list of the entities.
 
 ---++ Parameters
    * :entity-type can be cluster, feed or process.
+   * :fields (optional) additional fields that the client are interested in, separated by commas.
+     Currently falcon only support status as a valid field.
 
 ---++ Results
 List of the entities.
@@ -35,3 +37,26 @@ Remote-User: rgautam
 }
 </verbatim>
 
+---+++ Rest Call
+<verbatim>
+GET http://localhost:15000/api/entities/list/feed?fields=status
+Remote-User: rgautam
+</verbatim>
+---+++ Result
+<verbatim>
+{
+    "entity": [
+        {
+            "name"  : "SampleOutput",
+            "type"  : "feed",
+            "status": "RUNNING"
+        },
+        {
+            "name": "SampleInput",
+            "type": "feed",
+            "status": "RUNNING"
+        }
+    ]
+}
+</verbatim>
+

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/restapi/EntityUpdate.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/restapi/EntityUpdate.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/restapi/EntityUpdate.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/restapi/EntityUpdate.twiki Thu Feb  6 07:37:58 2014
@@ -10,6 +10,7 @@ Updates the submitted entity.
 ---++ Parameters
    * :entity-type can be feed or process.
    * :entity-name is name of the feed or process.
+   * :effective is optional effective time
 
 ---++ Results
 Result of the validation.
@@ -17,7 +18,7 @@ Result of the validation.
 ---++ Examples
 ---+++ Rest Call
 <verbatim>
-POST http://localhost:15000/api/entities/update/process/SampleProcess
+POST http://localhost:15000/api/entities/update/process/SampleProcess?effective=2014-01-01T00:00Z
 Remote-User: rgautam
 <?xml version="1.0" encoding="UTF-8"?>
 <!-- Daily sample process. Runs at 6th hour every day. Input - last day's hourly data. Generates output for yesterday -->
@@ -58,7 +59,7 @@ Remote-User: rgautam
 <verbatim>
 {
     "requestId": "update\/default\/d6aaa328-6836-4818-a212-515bb43d8b86\n\n",
-    "message": "update\/default\/SampleProcess updated successfully\n\n",
+    "message": "update\/default\/SampleProcess updated successfully with effective time [(local/2014-01-01T00:00Z)]\n\n",
     "status": "SUCCEEDED"
 }
 </verbatim>

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceLogs.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceLogs.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceLogs.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceLogs.twiki Thu Feb  6 07:37:58 2014
@@ -10,7 +10,8 @@ Get log of a specific instance of an ent
 ---++ Parameters
    * :entity-type can either be a feed or a process.
    * :entity-name is name of the entity.
-   * start is the start time of the instace that you want to refer to
+   * start is the start time of the instance that you want to refer to
+   * end <optional param> is the end time of the instance that you want to refer to
 
 ---++ Results
 Log of specified instance.

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceStatus.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceStatus.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceStatus.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceStatus.twiki Thu Feb  6 07:37:58 2014
@@ -11,6 +11,7 @@ Get status of a specific instance of an 
    * :entity-type can either be a feed or a process.
    * :entity-name is name of the entity.
    * start is the start time of the instance that you want to refer to
+   * end <optional param> is the end time of the instance that you want to refer to
 
 ---++ Results
 Status of the specified instance.

Added: incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceSummary.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceSummary.twiki?rev=1565098&view=auto
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceSummary.twiki (added)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/restapi/InstanceSummary.twiki Thu Feb  6 07:37:58 2014
@@ -0,0 +1,45 @@
+---++  GET /api/instance/summary/:entity-type/:entity-name
+   * <a href="#Description">Description</a>
+   * <a href="#Parameters">Parameters</a>
+   * <a href="#Results">Results</a>
+   * <a href="#Examples">Examples</a>
+
+---++ Description
+Get summary of instance/instances of an entity.
+
+---++ Parameters
+   * :entity-type can either be a feed or a process.
+   * :entity-name is name of the entity.
+   * start is the start time of the instance that you want to refer to
+   * end <optional param> is the end time of the instance that you want to refer to
+
+---++ Results
+Summary of the instances over the specified time range
+
+---++ Examples
+---+++ Rest Call
+<verbatim>
+GET http://localhost:15000/api/instance/summary/process/WordCount?colo=*&start=2014-01-21T13:00Z&end=2014-01-21T16:00Z
+Remote-User: suhas
+</verbatim>
+---+++ Result
+<verbatim>
+{
+    "status":"SUCCEEDED",
+    "message":"default/SUMMARY\n",
+    "requestId":"default/c344567b-da73-44d5-bcd4-bf456524934c\n",
+    "instancesSummary":
+        {
+            "cluster":"local",
+            "map":
+                {
+                    "entry":
+                        {
+                            "key":"SUCCEEDED",
+                            "value":"3"
+                         }
+                }
+            }
+        }
+}
+</verbatim>

Modified: incubator/falcon/trunk/general/src/site/twiki/docs/restapi/ResourceList.twiki
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/general/src/site/twiki/docs/restapi/ResourceList.twiki?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/general/src/site/twiki/docs/restapi/ResourceList.twiki (original)
+++ incubator/falcon/trunk/general/src/site/twiki/docs/restapi/ResourceList.twiki Thu Feb  6 07:37:58 2014
@@ -23,17 +23,17 @@
 | DELETE      | [[EntityDelete][api/entities/delete/:entity-type/:entity-name]]             | Delete the entity                  |
 | GET         | [[EntityStatus][api/entities/status/:entity-type/:entity-name]]             | Get the status of the entity       |
 | GET         | [[EntityDefinition][api/entities/definition/:entity-type/:entity-name]]     | Get the definition of the entity   |
-| GET         | [[EntityList][api/entities/list/:entity-type]]                              | Get the list of entities           |
+| GET         | [[EntityList][api/entities/list/:entity-type?fields=:fields]]               | Get the list of entities           |
 | GET         | [[EntityDependencies][api/entities/dependencies/:entity-type/:entity-name]] | Get the dependencies of the entity |
 
 ---++ REST Call on Feed and Process Instances
 
 | *Call Type* | *Resource*                                                           | *Description*                |
 | GET         | [[InstanceRunning][api/instance/running/:entity-type/:entity-name]]  | List of running instances.   |
-| GET         | [[InstanceStatus][api/instance/status/:entity-type/:entity-name]]   | Status of a given instance   |
-| POST        | [[InstanceKill][api/instance/kill/:entity-type/:entity-name]]       | Kill a given instance        |
-| POST        | [[InstanceSuspend][api/instance/suspend/:entity-type/:entity-name]] | Suspend a running instance   |
-| POST        | [[InstanceResume][api/instance/resume/:entity-type/:entity-name]]   | Resume a given instance      |
-| POST        | [[InstanceRerun][api/instance/rerun/:entity-type/:entity-name]]     | Rerun a given instance       |
-| GET         | [[InstanceLogs][api/instance/logs/:entity-type/:entity-name]]       | Get logs of a given instance |
+| GET         | [[InstanceStatus][api/instance/status/:entity-type/:entity-name]]]   | Status of a given instance   |
+| POST        | [[InstanceKill][api/instance/kill/:entity-type/:entity-name]]]       | Kill a given instance        |
+| POST        | [[InstanceSuspend][api/instance/suspend/:entity-type/:entity-name]]] | Suspend a running instance   |
+| POST        | [[InstanceResume][api/instance/resume/:entity-type/:entity-name]]]   | Resume a given instance      |
+| POST        | [[InstanceRerun][api/instance/rerun/:entity-type/:entity-name]]]     | Rerun a given instance       |
+| GET         | [[InstanceLogs][api/instance/logs/:entity-type/:entity-name]]]       | Get logs of a given instance |
 

Modified: incubator/falcon/trunk/pom.xml
URL: http://svn.apache.org/viewvc/incubator/falcon/trunk/pom.xml?rev=1565098&r1=1565097&r2=1565098&view=diff
==============================================================================
--- incubator/falcon/trunk/pom.xml (original)
+++ incubator/falcon/trunk/pom.xml Thu Feb  6 07:37:58 2014
@@ -21,7 +21,7 @@
     <modelVersion>4.0.0</modelVersion>
     <groupId>org.apache.falcon</groupId>
     <artifactId>falcon-website</artifactId>
-    <version>0.4-SNAPSHOT</version>
+    <version>0.5-SNAPSHOT</version>
     <packaging>pom</packaging>
 
     <name>Apache Falcon</name>