You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@falcon.apache.org by aj...@apache.org on 2015/08/04 14:01:51 UTC

[1/3] falcon git commit: FALCON-1297 Falcon Unit which supports Submit and Schedule of jobs. Contributed by Pavan Kumar Kolamuri.

Repository: falcon
Updated Branches:
  refs/heads/master 5a3e1d66f -> 77910aefd


http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/resources/deploy.properties
----------------------------------------------------------------------
diff --git a/unit/src/main/resources/deploy.properties b/unit/src/main/resources/deploy.properties
new file mode 100644
index 0000000..7ad5007
--- /dev/null
+++ b/unit/src/main/resources/deploy.properties
@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Application deployment properties.particularly relating to whether the server is in embedded mode or distributed mode.
+*.domain=all
+*.deploy.mode=embedded
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/resources/localoozie-log4j.properties
----------------------------------------------------------------------
diff --git a/unit/src/main/resources/localoozie-log4j.properties b/unit/src/main/resources/localoozie-log4j.properties
new file mode 100644
index 0000000..84b2d7a
--- /dev/null
+++ b/unit/src/main/resources/localoozie-log4j.properties
@@ -0,0 +1,34 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+log4j.appender.oozie=org.apache.log4j.ConsoleAppender
+log4j.appender.oozie.Target=System.out
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n
+
+log4j.appender.null=org.apache.log4j.varia.NullAppender
+
+log4j.logger.org.apache=INFO, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+
+log4j.logger.opslog=NONE, null
+log4j.logger.applog=NONE, null
+log4j.logger.instrument=NONE, null
+
+log4j.logger.a=ALL, null
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/resources/log4j.xml
----------------------------------------------------------------------
diff --git a/unit/src/main/resources/log4j.xml b/unit/src/main/resources/log4j.xml
new file mode 100644
index 0000000..a161eb0
--- /dev/null
+++ b/unit/src/main/resources/log4j.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
+    <appender name="console" class="org.apache.log4j.ConsoleAppender">
+        <param name="Target" value="System.out"/>
+        <layout class="org.apache.log4j.PatternLayout">
+            <param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
+        </layout>
+    </appender>
+
+    <appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">
+        <param name="File" value="${user.dir}/target/logs/application.log"/>
+        <param name="Append" value="true"/>
+        <param name="Threshold" value="debug"/>
+        <layout class="org.apache.log4j.PatternLayout">
+            <param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
+        </layout>
+    </appender>
+
+    <appender name="AUDIT" class="org.apache.log4j.DailyRollingFileAppender">
+        <param name="File" value="${user.dir}/target/logs/audit.log"/>
+        <param name="Append" value="true"/>
+        <param name="Threshold" value="debug"/>
+        <layout class="org.apache.log4j.PatternLayout">
+            <param name="ConversionPattern" value="%d %x %m%n"/>
+        </layout>
+    </appender>
+
+    <appender name="METRIC" class="org.apache.log4j.DailyRollingFileAppender">
+        <param name="File" value="${user.dir}/target/logs/metric.log"/>
+        <param name="Append" value="true"/>
+        <param name="Threshold" value="debug"/>
+        <layout class="org.apache.log4j.PatternLayout">
+            <param name="ConversionPattern" value="%d %m%n"/>
+        </layout>
+    </appender>
+
+    <appender name="ALERT" class="org.apache.log4j.DailyRollingFileAppender">
+        <param name="File" value="${falcon.log.dir}/${falcon.app.type}.alerts.log"/>
+        <param name="Append" value="true"/>
+        <param name="Threshold" value="debug"/>
+        <layout class="org.apache.log4j.PatternLayout">
+            <param name="ConversionPattern" value="%d %m%n"/>
+        </layout>
+    </appender>
+
+    <logger name="org.apache.falcon" additivity="false">
+        <level value="info"/>
+        <appender-ref ref="console"/>
+    </logger>
+
+    <logger name="org.apache.oozie" additivity="false">
+        <level value="info"/>
+        <appender-ref ref="console"/>
+    </logger>
+
+    <logger name="AUDIT">
+        <level value="info"/>
+        <appender-ref ref="AUDIT"/>
+    </logger>
+
+    <logger name="METRIC">
+        <level value="info"/>
+        <appender-ref ref="METRIC"/>
+    </logger>
+
+    <root>
+        <priority value="info"/>
+        <appender-ref ref="console"/>
+    </root>
+
+</log4j:configuration>

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/resources/mapred-site.xml
----------------------------------------------------------------------
diff --git a/unit/src/main/resources/mapred-site.xml b/unit/src/main/resources/mapred-site.xml
new file mode 100644
index 0000000..f60981e
--- /dev/null
+++ b/unit/src/main/resources/mapred-site.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+    <property>
+        <name>mapreduce.framework.name</name>
+        <value>unit</value>
+    </property>
+
+    <property>
+        <name>mapreduce.jobtracker.system.dir</name>
+        <value>/tmp</value>
+    </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/resources/oozie-site.xml
----------------------------------------------------------------------
diff --git a/unit/src/main/resources/oozie-site.xml b/unit/src/main/resources/oozie-site.xml
new file mode 100644
index 0000000..23d41eb
--- /dev/null
+++ b/unit/src/main/resources/oozie-site.xml
@@ -0,0 +1,170 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>oozie.service.HadoopAccessorService.supported.filesystems</name>
+        <value>hdfs,hftp,webhdfs,jail</value>
+    </property>
+    <property>
+        <name>oozie.service.JPAService.create.db.schema</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
+        <value>
+            now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
+            today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo,
+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo,
+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
+            latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+            future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo
+        </value>
+        <description>
+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+            This property is a convenience property to add extensions to the built in executors without having to
+            include all the built in ones.
+        </description>
+    </property>
+    <property>
+        <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
+        <value>
+            now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
+            today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek_inst,
+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek_inst,
+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
+            latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+            future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
+        </value>
+        <description>
+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+            This property is a convenience property to add extensions to the built in executors without having to
+            include all the built in ones.
+        </description>
+    </property>
+    <property>
+        <name>oozie.service.ELService.ext.functions.coord-action-create</name>
+        <value>
+            now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
+            today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek,
+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek,
+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
+            latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+            future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
+        </value>
+        <description>
+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+            This property is a convenience property to add extensions to the built in executors without having to
+            include all the built in ones.
+        </description>
+    </property>
+    <property>
+        <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
+        <value>
+            now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
+            today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_currentWeek_echo,
+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph1_lastWeek_echo,
+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
+            dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
+            instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
+            dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
+        </value>
+        <description>
+            EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
+            This property is a convenience property to add extensions to the built in executors without having to
+            include all the built in ones.
+        </description>
+    </property>
+    <property>
+        <name>oozie.service.ELService.ext.functions.coord-action-start</name>
+        <value>
+            now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
+            today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
+            yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
+            currentWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_currentWeek,
+            lastWeek=org.apache.oozie.extensions.OozieELExtensions#ph2_lastWeek,
+            currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
+            lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
+            currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
+            lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
+            latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
+            future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
+            dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
+            instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
+            dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
+            formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
+        </value>
+        <description>
+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+            This property is a convenience property to add extensions to the built in executors without having to
+            include all the built in ones.
+        </description>
+    </property>
+    <property>
+        <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
+        <value>
+            instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
+        </value>
+        <description>
+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+        </description>
+    </property>
+    <property>
+        <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
+        <value>
+            instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
+            user=org.apache.oozie.coord.CoordELFunctions#coord_user
+        </value>
+        <description>
+            EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
+        </description>
+    </property>
+    <property>
+        <name>oozie.service.coord.check.maximum.frequency</name>
+        <value>false</value>
+    </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/resources/startup.properties
----------------------------------------------------------------------
diff --git a/unit/src/main/resources/startup.properties b/unit/src/main/resources/startup.properties
new file mode 100644
index 0000000..4207ab9
--- /dev/null
+++ b/unit/src/main/resources/startup.properties
@@ -0,0 +1,129 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+*.domain=debug
+
+######### Implementation classes #########
+## DONT MODIFY UNLESS SURE ABOUT CHANGE ##
+
+*.workflow.engine.impl=org.apache.falcon.workflow.engine.OozieWorkflowEngine
+*.oozie.process.workflow.builder=org.apache.falcon.workflow.OozieProcessWorkflowBuilder
+*.oozie.feed.workflow.builder=org.apache.falcon.workflow.OozieFeedWorkflowBuilder
+*.SchedulableEntityManager.impl=org.apache.falcon.resource.SchedulableEntityManager
+*.ConfigSyncService.impl=org.apache.falcon.resource.ConfigSyncService
+*.ProcessInstanceManager.impl=org.apache.falcon.resource.InstanceManager
+*.catalog.service.impl=org.apache.falcon.catalog.HiveCatalogService
+
+##### Falcon Services #####
+*.application.services=org.apache.falcon.security.AuthenticationInitializationService,\
+                        org.apache.falcon.workflow.WorkflowJobEndNotificationService, \
+                        org.apache.falcon.service.ProcessSubscriberService,\
+                        org.apache.falcon.entity.store.ConfigurationStore,\
+                        org.apache.falcon.rerun.service.RetryService,\
+                        org.apache.falcon.rerun.service.LateRunService,\
+
+##### Falcon Configuration Store Change listeners #####
+*.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\
+                        org.apache.falcon.entity.ColoClusterRelation,\
+                        org.apache.falcon.group.FeedGroupMap,\
+                        org.apache.falcon.entity.store.FeedLocationStore
+
+##### JMS MQ Broker Implementation class #####
+*.broker.impl.class=org.apache.activemq.ActiveMQConnectionFactory
+
+
+######### System startup parameters #########
+
+# Location to store user entity configurations
+debug.config.store.uri=file://${user.dir}/target/store
+debug.config.store.persist=false
+debug.config.oozie.conf.uri=${user.dir}/target/oozie
+debug.system.lib.location=${system.lib.location}
+debug.broker.url=vm://localhost
+debug.retry.recorder.path=${user.dir}/target/retry
+debug.libext.feed.retention.paths=${falcon.libext}
+debug.libext.feed.replication.paths=${falcon.libext}
+debug.libext.process.paths=${falcon.libext}
+
+*.falcon.cleanup.service.frequency=minutes(5)
+
+
+######### Properties for configuring JMS provider - activemq #########
+# Default Active MQ url
+*.broker.url=tcp://localhost:61616
+
+# default time-to-live for a JMS message 3 days (time in minutes)
+*.broker.ttlInMins=4320
+*.entity.topic=FALCON.ENTITY.TOPIC
+*.max.retry.failure.count=1
+*.retry.recorder.path=${user.dir}/logs/retry
+
+######### Properties for configuring iMon client and metric #########
+*.internal.queue.size=1000
+
+
+##### List of shared libraries for Falcon workflows #####
+*.shared.libs=activemq-core,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3
+
+######### Authentication Properties #########
+
+# Authentication type must be specified: simple|kerberos
+*.falcon.authentication.type=simple
+
+##### Service Configuration
+
+# Indicates the Kerberos principal to be used in Falcon Service.
+*.falcon.service.authentication.kerberos.principal=
+
+# Location of the keytab file with the credentials for the Service principal.
+*.falcon.service.authentication.kerberos.keytab=
+
+# name node principal to talk to config store
+*.dfs.namenode.kerberos.principal=
+
+##### SPNEGO Configuration
+
+# Authentication type must be specified: simple|kerberos|<class>
+# org.apache.falcon.security.RemoteUserInHeaderBasedAuthenticationHandler can be used for backwards compatibility
+*.falcon.http.authentication.type=simple
+
+# Indicates how long (in seconds) an authentication token is valid before it has to be renewed.
+*.falcon.http.authentication.token.validity=36000
+
+# The signature secret for signing the authentication tokens.
+*.falcon.http.authentication.signature.secret=falcon
+
+# The domain to use for the HTTP cookie that stores the authentication token.
+*.falcon.http.authentication.cookie.domain=
+
+# Indicates if anonymous requests are allowed when using 'simple' authentication.
+*.falcon.http.authentication.simple.anonymous.allowed=false
+
+# Indicates the Kerberos principal to be used for HTTP endpoint.
+# The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
+*.falcon.http.authentication.kerberos.principal=
+
+# Location of the keytab file with the credentials for the HTTP principal.
+*.falcon.http.authentication.kerberos.keytab=
+
+# The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's KerberosName for more details.
+*.falcon.http.authentication.kerberos.name.rules=DEFAULT
+
+# Comma separated list of black listed users
+*.falcon.http.authentication.blacklisted.users=
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/test/java/org/apache/falcon/unit/FalconUnitTestBase.java
----------------------------------------------------------------------
diff --git a/unit/src/test/java/org/apache/falcon/unit/FalconUnitTestBase.java b/unit/src/test/java/org/apache/falcon/unit/FalconUnitTestBase.java
new file mode 100644
index 0000000..bd03efb
--- /dev/null
+++ b/unit/src/test/java/org/apache/falcon/unit/FalconUnitTestBase.java
@@ -0,0 +1,317 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.falcon.unit;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.falcon.FalconException;
+import org.apache.falcon.client.FalconCLIException;
+import org.apache.falcon.entity.FeedHelper;
+import org.apache.falcon.entity.Storage;
+import org.apache.falcon.entity.store.ConfigurationStore;
+import org.apache.falcon.entity.v0.Entity;
+import org.apache.falcon.entity.v0.EntityType;
+import org.apache.falcon.entity.v0.feed.Feed;
+import org.apache.falcon.entity.v0.feed.LocationType;
+import org.apache.falcon.entity.v0.process.Process;
+import org.apache.falcon.expression.ExpressionHelper;
+import org.apache.falcon.hadoop.HadoopClientFactory;
+import org.apache.falcon.hadoop.JailedFileSystem;
+import org.apache.falcon.resource.APIResult;
+import org.apache.falcon.resource.InstancesResult;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.AfterTest;
+import org.testng.annotations.BeforeClass;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.InputStream;
+import java.io.BufferedReader;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.text.ParseException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.TimeZone;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Test Utility for Local Falcon Unit.
+ */
+public class FalconUnitTestBase {
+
+    /**
+     * Perform a predicate evaluation.
+     *
+     * @return the boolean result of the evaluation.
+     * @throws Exception thrown if the predicate evaluation could not evaluate.
+     */
+    public interface Predicate {
+
+        boolean evaluate() throws Exception;
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(FalconUnitTestBase.class);
+
+    private static final String DEFAULT_CLUSTER = "local";
+    private static final String DEFAULT_COLO = "local";
+    private static final String CLUSTER = "cluster";
+    private static final String COLO = "colo";
+    private static final String CLUSTER_TEMPLATE = "/cluster-template.xml";
+    private static final String STAGING_PATH = "/projects/falcon/staging";
+    private static final String WORKING_PATH = "/projects/falcon/working";
+
+    public static final Pattern VAR_PATTERN = Pattern.compile("##[A-Za-z0-9_.]*##");
+    protected static FalconUnitClient falconUnitClient;
+    protected static JailedFileSystem fs;
+    protected static ConfigurationStore configStore;
+
+    @BeforeClass
+    public void setup() throws FalconException, IOException {
+        FalconUnit.start(true);
+        falconUnitClient = FalconUnit.getClient();
+        fs = (JailedFileSystem) FalconUnit.getFileSystem();
+        configStore = falconUnitClient.getConfigStore();
+    }
+
+    @AfterClass
+    public void cleanup() throws Exception {
+        fs.delete(new Path(STAGING_PATH), true);
+        fs.delete(new Path(WORKING_PATH), true);
+        FalconUnit.cleanup();
+    }
+
+    @AfterTest
+    public void cleanUpActionXml() throws IOException {
+        //Needed since oozie writes action xml to current directory.
+        FileUtils.deleteQuietly(new File("action.xml"));
+        FileUtils.deleteQuietly(new File(".action.xml.crc"));
+    }
+
+    protected FalconUnitClient getClient() throws FalconException {
+        return FalconUnit.getClient();
+    }
+
+    protected JailedFileSystem getFileSystem() throws IOException {
+        return fs;
+    }
+
+    public boolean submitCluster(String colo, String cluster,
+                                 Map<String, String> props) throws IOException, FalconCLIException {
+        props = updateColoAndCluster(colo, cluster, props);
+        fs.mkdirs(new Path(STAGING_PATH), HadoopClientFactory.ALL_PERMISSION);
+        fs.mkdirs(new Path(WORKING_PATH), HadoopClientFactory.READ_EXECUTE_PERMISSION);
+        String clusterXmlPath = overlayParametersOverTemplate(CLUSTER_TEMPLATE, props);
+        APIResult result = falconUnitClient.submit(CLUSTER, clusterXmlPath);
+        return true ? APIResult.Status.SUCCEEDED.equals(result.getStatus()) : false;
+    }
+
+    public boolean submitCluster() throws IOException, FalconCLIException {
+        return submitCluster(DEFAULT_COLO, DEFAULT_CLUSTER, null);
+    }
+
+    public APIResult submit(EntityType entityType, String filePath) throws FalconCLIException, IOException {
+        return submit(entityType.toString(), filePath);
+    }
+
+    public APIResult submit(String entityType, String filePath) throws FalconCLIException, IOException {
+        return falconUnitClient.submit(entityType, filePath);
+    }
+
+    public APIResult submitProcess(String filePath, String appDirectory) throws IOException, FalconCLIException {
+        createDir(appDirectory);
+        return submit(EntityType.PROCESS, filePath);
+    }
+
+    public APIResult scheduleProcess(String processName, String startTime, int numInstances,
+                                   String cluster, String localWfPath) throws FalconException,
+            IOException, FalconCLIException {
+        Process processEntity = configStore.get(EntityType.PROCESS, processName);
+        if (processEntity == null) {
+            throw new FalconException("Process not found " + processName);
+        }
+        String workflowPath = processEntity.getWorkflow().getPath();
+        fs.copyFromLocalFile(new Path(localWfPath), new Path(workflowPath));
+        return falconUnitClient.schedule(EntityType.PROCESS, processName, startTime, numInstances, cluster);
+    }
+
+    public APIResult scheduleProcess(String processName, String startTime, int numInstances,
+                                   String cluster) throws FalconException, FalconCLIException {
+        Process processEntity = configStore.get(EntityType.PROCESS, processName);
+        if (processEntity == null) {
+            throw new FalconException("Process not found " + processName);
+        }
+        return falconUnitClient.schedule(EntityType.PROCESS, processName, startTime, numInstances, cluster);
+    }
+
+    private Map<String, String> updateColoAndCluster(String colo, String cluster, Map<String, String> props) {
+        if (props == null) {
+            props = new HashMap<>();
+        }
+        String coloProp = StringUtils.isEmpty(colo) ? DEFAULT_COLO : colo;
+        props.put(COLO, coloProp);
+
+        String clusterProp = StringUtils.isEmpty(cluster) ? DEFAULT_CLUSTER : cluster;
+        props.put(CLUSTER, clusterProp);
+
+        return props;
+    }
+
+    public static String overlayParametersOverTemplate(String template,
+                                                       Map<String, String> overlay) throws IOException {
+        File tmpFile = getTempFile();
+        OutputStream out = new FileOutputStream(tmpFile);
+
+        InputStreamReader in;
+        InputStream resourceAsStream = FalconUnitTestBase.class.getResourceAsStream(template);
+        if (resourceAsStream == null) {
+            in = new FileReader(template);
+        } else {
+            in = new InputStreamReader(resourceAsStream);
+        }
+        BufferedReader reader = new BufferedReader(in);
+        String line;
+        while ((line = reader.readLine()) != null) {
+            Matcher matcher = VAR_PATTERN.matcher(line);
+            while (matcher.find()) {
+                String variable = line.substring(matcher.start(), matcher.end());
+                line = line.replace(variable, overlay.get(variable.substring(2, variable.length() - 2)));
+                matcher = VAR_PATTERN.matcher(line);
+            }
+            out.write(line.getBytes());
+            out.write("\n".getBytes());
+        }
+        reader.close();
+        out.close();
+        return tmpFile.getAbsolutePath();
+    }
+
+
+    public static File getTempFile() throws IOException {
+        return getTempFile("test", ".xml");
+    }
+
+    public static File getTempFile(String prefix, String suffix) throws IOException {
+        return getTempFile("target", prefix, suffix);
+    }
+
+    @SuppressWarnings("ResultOfMethodCallIgnored")
+    public static File getTempFile(String path, String prefix, String suffix) throws IOException {
+        File f = new File(path);
+        if (!f.exists()) {
+            f.mkdirs();
+        }
+        return File.createTempFile(prefix, suffix, f);
+    }
+
+    /**
+     * Creates data in the feed path with the given timestamp.
+     *
+     * @param feedName
+     * @param cluster
+     * @param time
+     * @param inputFile
+     * @throws FalconException
+     * @throws ParseException
+     * @throws IOException
+     */
+    public void createData(String feedName, String cluster, String time,
+                           String inputFile) throws FalconException, ParseException, IOException {
+        String feedPath = getFeedPathForTS(cluster, feedName, time);
+        fs.mkdirs(new Path(feedPath));
+        fs.copyFromLocalFile(new Path(getAbsolutePath("/" + inputFile)), new Path(feedPath));
+    }
+
+    protected String getFeedPathForTS(String cluster, String feedName,
+                                      String timeStamp) throws FalconException, ParseException {
+        Entity existingEntity = configStore.get(EntityType.FEED, feedName);
+        if (existingEntity == null) {
+            throw new FalconException("Feed Not Found  " + feedName);
+        }
+        Feed feed = (Feed) existingEntity;
+        Storage rawStorage = FeedHelper.createStorage(cluster, feed);
+        String feedPathTemplate = rawStorage.getUriTemplate(LocationType.DATA);
+        Properties properties = ExpressionHelper.getTimeVariables(ExpressionHelper.FORMATTER.get().parse(timeStamp),
+                TimeZone.getTimeZone("UTC"));
+        String feedPath = ExpressionHelper.substitute(feedPathTemplate, properties);
+        return feedPath;
+    }
+
+
+    public String getAbsolutePath(String fileName) {
+        return this.getClass().getResource(fileName).getPath();
+    }
+
+    public void createDir(String path) throws IOException {
+        fs.mkdirs(new Path(path));
+    }
+
+    /**
+     * Wait for a condition, expressed via a {@link Predicate} to become true.
+     *
+     * @param timeout   maximum time in milliseconds to wait for the predicate to become true.
+     * @param predicate predicate waiting on.
+     * @return the waited time.
+     */
+    protected long waitFor(int timeout, Predicate predicate) {
+        long started = System.currentTimeMillis();
+        long mustEnd = System.currentTimeMillis() + timeout;
+        long lastEcho = 0;
+        try {
+            long waiting = mustEnd - System.currentTimeMillis();
+            LOG.info("Waiting up to [{}] msec", waiting);
+            while (!(predicate.evaluate()) && System.currentTimeMillis() < mustEnd) {
+                if ((System.currentTimeMillis() - lastEcho) > 5000) {
+                    waiting = mustEnd - System.currentTimeMillis();
+                    LOG.info("Waiting up to [{}] msec", waiting);
+                    lastEcho = System.currentTimeMillis();
+                }
+                Thread.sleep(5000);
+            }
+            if (!predicate.evaluate()) {
+                LOG.info("Waiting timed out after [{}] msec", timeout);
+            }
+            return System.currentTimeMillis() - started;
+        } catch (Exception ex) {
+            throw new RuntimeException(ex);
+        }
+    }
+
+    protected long waitForStatus(final EntityType entityType, final String entityName, final String instanceTime) {
+        return waitFor(20000, new Predicate() {
+            public boolean evaluate() throws Exception {
+                InstancesResult.WorkflowStatus status = falconUnitClient.getInstanceStatus(entityType,
+                        entityName, instanceTime);
+                return InstancesResult.WorkflowStatus.SUCCEEDED.equals(status);
+            }
+        });
+    }
+
+    public void assertStatus(APIResult apiResult) {
+        Assert.assertEquals(APIResult.Status.SUCCEEDED, apiResult.getStatus());
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/test/java/org/apache/falcon/unit/TestFalconUnit.java
----------------------------------------------------------------------
diff --git a/unit/src/test/java/org/apache/falcon/unit/TestFalconUnit.java b/unit/src/test/java/org/apache/falcon/unit/TestFalconUnit.java
new file mode 100644
index 0000000..855be79
--- /dev/null
+++ b/unit/src/test/java/org/apache/falcon/unit/TestFalconUnit.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.falcon.unit;
+
+import org.apache.falcon.entity.v0.EntityType;
+import org.apache.falcon.resource.APIResult;
+import org.apache.falcon.resource.InstancesResult;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+/**
+ * Test cases of falcon jobs using Local Oozie and LocalJobRunner.
+ */
+public class TestFalconUnit extends FalconUnitTestBase {
+
+    @Test
+    public void testProcessInstanceExecution() throws Exception {
+        // submit with default props
+        submitCluster();
+        // submitting feeds
+        APIResult result = submit(EntityType.FEED, getAbsolutePath("/infeed.xml"));
+        assertStatus(result);
+        result = submit(EntityType.FEED, getAbsolutePath("/outfeed.xml"));
+        assertStatus(result);
+        // submitting and scheduling process
+        String scheduleTime = "2015-06-20T00:00Z";
+        createData("in", "local", scheduleTime, "input.txt");
+        result = submitProcess(getAbsolutePath("/process.xml"), "/app/oozie-mr");
+        assertStatus(result);
+        result = scheduleProcess("process", scheduleTime, 1, "local", getAbsolutePath("/workflow.xml"));
+        assertStatus(result);
+        waitForStatus(EntityType.PROCESS, "process", scheduleTime);
+        InstancesResult.WorkflowStatus status = falconUnitClient.getInstanceStatus(EntityType.PROCESS,
+                "process", scheduleTime);
+        Assert.assertEquals(InstancesResult.WorkflowStatus.SUCCEEDED, status);
+        String outPath = getFeedPathForTS("local", "out", scheduleTime);
+        Assert.assertTrue(getFileSystem().exists(new Path(outPath)));
+        FileStatus[] files = getFileSystem().listStatus(new Path(outPath));
+        Assert.assertTrue(files.length > 0);
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/test/java/org/apache/falcon/unit/examples/JavaExample.java
----------------------------------------------------------------------
diff --git a/unit/src/test/java/org/apache/falcon/unit/examples/JavaExample.java b/unit/src/test/java/org/apache/falcon/unit/examples/JavaExample.java
new file mode 100644
index 0000000..95e320b
--- /dev/null
+++ b/unit/src/test/java/org/apache/falcon/unit/examples/JavaExample.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.falcon.unit.examples;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+
+/**
+ * Java Example for file copy.
+ */
+public final class JavaExample {
+
+    private JavaExample() {}
+
+    public static void main(String[] args) throws IOException {
+        System.out.println("Java Main Example");
+
+        if (args.length != 2) {
+            throw new IllegalArgumentException("No of arguments should be two");
+        }
+        String inputPath = args[0];
+        String outPath = args[1];
+        FileSystem fs = FileSystem.get(new Configuration());
+        fs.mkdirs(new Path(outPath));
+        OutputStream out = fs.create(new Path(outPath + "/" + "part"));
+        FileStatus[] files = fs.listStatus(new Path(inputPath));
+        if (files != null) {
+            for (FileStatus file : files) {
+                BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(file.getPath())));
+                String line;
+                while ((line = reader.readLine()) != null) {
+                    if (!line.startsWith("#")) {
+                        out.write(line.getBytes());
+                        out.write("\n".getBytes());
+                        System.out.println(line);
+                    }
+                }
+                reader.close();
+            }
+        }
+        out.close();
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/test/resources/cluster-template.xml
----------------------------------------------------------------------
diff --git a/unit/src/test/resources/cluster-template.xml b/unit/src/test/resources/cluster-template.xml
new file mode 100644
index 0000000..d0c9b24
--- /dev/null
+++ b/unit/src/test/resources/cluster-template.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<cluster colo="##colo##" description="" name="##cluster##" xmlns="uri:falcon:cluster:0.1">
+    <interfaces>
+        <interface type="readonly" endpoint="jail://global:00"
+                   version="0.20.2"/>
+        <interface type="write" endpoint="jail://global:00"
+                   version="0.20.2"/>
+        <interface type="execute" endpoint="local" version="0.20.2"/>
+        <interface type="workflow" endpoint="localoozie"
+                   version="3.1"/>
+    </interfaces>
+    <locations>
+        <location name="staging" path="/projects/falcon/staging"/>
+        <location name="temp" path="/tmp"/>
+        <location name="working" path="/projects/falcon/working"/>
+    </locations>
+    <properties>
+    </properties>
+</cluster>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/test/resources/infeed.xml
----------------------------------------------------------------------
diff --git a/unit/src/test/resources/infeed.xml b/unit/src/test/resources/infeed.xml
new file mode 100644
index 0000000..509d868
--- /dev/null
+++ b/unit/src/test/resources/infeed.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<feed description="input" name="in" xmlns="uri:falcon:feed:0.1">
+    <groups>inputs</groups>
+
+    <frequency>minutes(1)</frequency>
+    <timezone>UTC</timezone>
+    <late-arrival cut-off="hours(1)"/>
+
+    <clusters>
+        <cluster name="local">
+            <validity start="2013-01-01T00:00Z" end="2030-01-01T00:00Z"/>
+            <retention limit="hours(400000)" action="delete"/>
+        </cluster>
+    </clusters>
+
+    <locations>
+        <location type="data" path="/data/in/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}"/>
+    </locations>
+
+    <ACL owner="user" group="user" permission="0x644"/>
+    <schema location="/schema/log/log.format.csv" provider="csv"/>
+</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/test/resources/input.txt
----------------------------------------------------------------------
diff --git a/unit/src/test/resources/input.txt b/unit/src/test/resources/input.txt
new file mode 100644
index 0000000..cb1d8c0
--- /dev/null
+++ b/unit/src/test/resources/input.txt
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+Hello Falcon Unit !!!!!!
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/test/resources/outfeed.xml
----------------------------------------------------------------------
diff --git a/unit/src/test/resources/outfeed.xml b/unit/src/test/resources/outfeed.xml
new file mode 100644
index 0000000..017afbe
--- /dev/null
+++ b/unit/src/test/resources/outfeed.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<feed description="output" name="out" xmlns="uri:falcon:feed:0.1">
+    <groups>outputs</groups>
+
+    <frequency>minutes(1)</frequency>
+    <timezone>UTC</timezone>
+    <late-arrival cut-off="hours(1)"/>
+
+    <clusters>
+        <cluster name="local">
+            <validity start="2013-01-01T00:00Z" end="2030-01-01T00:00Z"/>
+            <retention limit="hours(2)" action="delete"/>
+        </cluster>
+    </clusters>
+
+    <locations>
+        <location type="data" path="/data/out/${YEAR}/${MONTH}/${DAY}/${HOUR}/${MINUTE}"/>
+    </locations>
+
+    <ACL owner="user" group="user" permission="0x644"/>
+    <schema location="/schema/out/out.format.csv" provider="csv"/>
+</feed>

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/test/resources/process.xml
----------------------------------------------------------------------
diff --git a/unit/src/test/resources/process.xml b/unit/src/test/resources/process.xml
new file mode 100644
index 0000000..6854311
--- /dev/null
+++ b/unit/src/test/resources/process.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<process name="process" xmlns="uri:falcon:process:0.1">
+    <clusters>
+        <cluster name="local">
+            <validity start="2013-11-18T00:05Z" end="2013-11-21T01:05Z"/>
+        </cluster>
+    </clusters>
+
+    <parallel>5</parallel>
+    <order>FIFO</order>
+    <frequency>minutes(1)</frequency>
+    <timezone>UTC</timezone>
+
+    <inputs>
+        <!-- In the workflow, the input paths will be available in a variable 'inpaths' -->
+        <input name="inpaths" feed="in" start="now(0,0)" end="now(0,0)" />
+    </inputs>
+
+    <outputs>
+        <!-- In the workflow, the output path will be available in a variable 'outpath' -->
+        <output name="outpath" feed="out" instance="now(0,0)"/>
+    </outputs>
+
+    <properties>
+        <!-- In the workflow, these properties will be available with variable - key -->
+        <property name="queueName" value="default"/>
+        <!-- The schedule time available as a property in workflow -->
+        <property name="time" value="${instanceTime()}"/>
+    </properties>
+
+    <workflow engine="oozie" path="/app/oozie-mr"/>
+</process>

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/test/resources/workflow.xml
----------------------------------------------------------------------
diff --git a/unit/src/test/resources/workflow.xml b/unit/src/test/resources/workflow.xml
new file mode 100644
index 0000000..8b4566c
--- /dev/null
+++ b/unit/src/test/resources/workflow.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<workflow-app xmlns="uri:oozie:workflow:0.2" name="java-main-wf">
+    <start to="java-node"/>
+    <action name="java-node">
+        <java>
+            <job-tracker>local</job-tracker>
+            <name-node>jail://global:00</name-node>
+            <configuration>
+                <property>
+                    <name>mapred.job.queue.name</name>
+                    <value>default</value>
+                </property>
+            </configuration>
+            <main-class>org.apache.falcon.unit.examples.JavaExample</main-class>
+            <arg>${inpaths}</arg>
+            <arg>${outpath}</arg>
+        </java>
+        <ok to="end"/>
+        <error to="fail"/>
+    </action>
+    <kill name="fail">
+        <message>Java failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
+    </kill>
+    <end name="end"/>
+</workflow-app>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/webapp/src/test/java/org/apache/falcon/resource/EntityManagerJerseyIT.java
----------------------------------------------------------------------
diff --git a/webapp/src/test/java/org/apache/falcon/resource/EntityManagerJerseyIT.java b/webapp/src/test/java/org/apache/falcon/resource/EntityManagerJerseyIT.java
index 47b51fe..c602ffb 100644
--- a/webapp/src/test/java/org/apache/falcon/resource/EntityManagerJerseyIT.java
+++ b/webapp/src/test/java/org/apache/falcon/resource/EntityManagerJerseyIT.java
@@ -42,7 +42,7 @@ import org.apache.oozie.client.BundleJob;
 import org.apache.oozie.client.CoordinatorJob;
 import org.apache.oozie.client.Job;
 import org.apache.oozie.client.Job.Status;
-import org.apache.oozie.client.ProxyOozieClient;
+import org.apache.oozie.client.OozieClient;
 import org.testng.Assert;
 import org.testng.annotations.AfterMethod;
 import org.testng.annotations.BeforeClass;
@@ -292,7 +292,7 @@ public class EntityManagerJerseyIT {
         OozieTestUtils.waitForBundleStart(context, Job.Status.RUNNING);
         List<BundleJob> bundles = OozieTestUtils.getBundles(context);
         Assert.assertEquals(bundles.size(), 1);
-        ProxyOozieClient ozClient = OozieTestUtils.getOozieClient(context.getCluster().getCluster());
+        OozieClient ozClient = OozieTestUtils.getOozieClient(context.getCluster().getCluster());
         String bundle = bundles.get(0).getId();
         String coordId = ozClient.getBundleJobInfo(bundle).getCoordinators().get(0).getId();
 
@@ -364,7 +364,7 @@ public class EntityManagerJerseyIT {
         OozieTestUtils.waitForBundleStart(context, Job.Status.RUNNING);
         List<BundleJob> bundles = OozieTestUtils.getBundles(context);
         Assert.assertEquals(bundles.size(), 1);
-        ProxyOozieClient ozClient = OozieTestUtils.getOozieClient(context.getCluster().getCluster());
+        OozieClient ozClient = OozieTestUtils.getOozieClient(context.getCluster().getCluster());
         String bundle = bundles.get(0).getId();
         String coordId = ozClient.getBundleJobInfo(bundle).getCoordinators().get(0).getId();
 

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/webapp/src/test/java/org/apache/falcon/resource/ProcessInstanceManagerIT.java
----------------------------------------------------------------------
diff --git a/webapp/src/test/java/org/apache/falcon/resource/ProcessInstanceManagerIT.java b/webapp/src/test/java/org/apache/falcon/resource/ProcessInstanceManagerIT.java
index 6e58064..ece4fbf 100644
--- a/webapp/src/test/java/org/apache/falcon/resource/ProcessInstanceManagerIT.java
+++ b/webapp/src/test/java/org/apache/falcon/resource/ProcessInstanceManagerIT.java
@@ -29,6 +29,7 @@ import org.apache.falcon.resource.InstancesResult.WorkflowStatus;
 import org.apache.falcon.security.CurrentUser;
 import org.apache.falcon.util.OozieTestUtils;
 import org.apache.falcon.workflow.engine.OozieClientFactory;
+import org.apache.oozie.client.OozieClient;
 import org.apache.oozie.client.ProxyOozieClient;
 import org.apache.oozie.client.WorkflowJob;
 import org.testng.Assert;
@@ -224,7 +225,7 @@ public class ProcessInstanceManagerIT {
     private void waitForWorkflow(String instance, WorkflowJob.Status status) throws Exception {
         TestContext context = new TestContext();
         ExternalId extId = new ExternalId(context.processName, Tag.DEFAULT, EntityUtil.parseDateUTC(instance));
-        ProxyOozieClient ozClient = OozieClientFactory.get(
+        OozieClient ozClient = OozieClientFactory.get(
                 (Cluster) ConfigurationStore.get().get(EntityType.CLUSTER, context.clusterName));
         String jobId = ozClient.getJobId(extId.getId());
         WorkflowJob jobInfo = null;

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/webapp/src/test/java/org/apache/falcon/util/OozieTestUtils.java
----------------------------------------------------------------------
diff --git a/webapp/src/test/java/org/apache/falcon/util/OozieTestUtils.java b/webapp/src/test/java/org/apache/falcon/util/OozieTestUtils.java
index 02d1011..804b2ed 100644
--- a/webapp/src/test/java/org/apache/falcon/util/OozieTestUtils.java
+++ b/webapp/src/test/java/org/apache/falcon/util/OozieTestUtils.java
@@ -32,6 +32,7 @@ import org.apache.oozie.client.BundleJob;
 import org.apache.oozie.client.CoordinatorJob;
 import org.apache.oozie.client.Job;
 import org.apache.oozie.client.Job.Status;
+import org.apache.oozie.client.OozieClient;
 import org.apache.oozie.client.ProxyOozieClient;
 import org.apache.oozie.client.WorkflowJob;
 
@@ -49,11 +50,11 @@ public final class OozieTestUtils {
     private OozieTestUtils() {
     }
 
-    public static ProxyOozieClient getOozieClient(TestContext context) throws FalconException {
+    public static OozieClient getOozieClient(TestContext context) throws FalconException {
         return getOozieClient(context.getCluster().getCluster());
     }
 
-    public static ProxyOozieClient getOozieClient(Cluster cluster) throws FalconException {
+    public static OozieClient getOozieClient(Cluster cluster) throws FalconException {
         return OozieClientFactory.get(cluster);
     }
 
@@ -63,7 +64,7 @@ public final class OozieTestUtils {
             return bundles;
         }
 
-        ProxyOozieClient ozClient = OozieClientFactory.get(context.getCluster().getCluster());
+        OozieClient ozClient = OozieClientFactory.get(context.getCluster().getCluster());
         return ozClient.getBundleJobsInfo("name=FALCON_PROCESS_" + context.getProcessName(), 0, 10);
     }
 
@@ -72,7 +73,7 @@ public final class OozieTestUtils {
             return true;
         }
 
-        ProxyOozieClient ozClient = getOozieClient(context);
+        OozieClient ozClient = getOozieClient(context);
         List<BundleJob> bundles = getBundles(context);
         if (bundles != null) {
             for (BundleJob bundle : bundles) {
@@ -88,7 +89,7 @@ public final class OozieTestUtils {
     }
 
     public static void waitForInstanceToComplete(TestContext context, String jobId) throws Exception {
-        ProxyOozieClient ozClient = getOozieClient(context);
+        OozieClient ozClient = getOozieClient(context);
         String lastStatus = null;
         for (int i = 0; i < 50; i++) {
             WorkflowJob job = ozClient.getJobInfo(jobId);
@@ -117,7 +118,7 @@ public final class OozieTestUtils {
     }
 
     private static List<WorkflowJob> getRunningJobs(TestContext context, String entityName) throws Exception {
-        ProxyOozieClient ozClient = getOozieClient(context);
+        OozieClient ozClient = getOozieClient(context);
         return ozClient.getJobsInfo(
                 ProxyOozieClient.FILTER_STATUS + '=' + Job.Status.RUNNING + ';'
                         + ProxyOozieClient.FILTER_NAME + '=' + "FALCON_PROCESS_DEFAULT_" + entityName);
@@ -133,7 +134,7 @@ public final class OozieTestUtils {
     }
 
     public static void waitForBundleStart(TestContext context, String bundleId, Job.Status... status) throws Exception {
-        ProxyOozieClient ozClient = getOozieClient(context);
+        OozieClient ozClient = getOozieClient(context);
         Set<Job.Status> statuses = new HashSet<Job.Status>(Arrays.asList(status));
 
         Status bundleStatus = null;
@@ -162,7 +163,7 @@ public final class OozieTestUtils {
     }
 
     public static WorkflowJob getWorkflowJob(Cluster cluster, String filter) throws Exception {
-        ProxyOozieClient ozClient = getOozieClient(cluster);
+        OozieClient ozClient = getOozieClient(cluster);
 
         List<WorkflowJob> jobs;
         while (true) {


[3/3] falcon git commit: FALCON-1301 Improve documentation for Installation. Contributed by Pragya Mittal

Posted by aj...@apache.org.
FALCON-1301 Improve documentation for Installation. Contributed by Pragya Mittal


Project: http://git-wip-us.apache.org/repos/asf/falcon/repo
Commit: http://git-wip-us.apache.org/repos/asf/falcon/commit/77910aef
Tree: http://git-wip-us.apache.org/repos/asf/falcon/tree/77910aef
Diff: http://git-wip-us.apache.org/repos/asf/falcon/diff/77910aef

Branch: refs/heads/master
Commit: 77910aefd2716f22d545eeeb32a7cc8f493bc5a9
Parents: 3f00d05
Author: Ajay Yadava <aj...@gmail.com>
Authored: Tue Aug 4 17:19:36 2015 +0530
Committer: Ajay Yadava <aj...@gmail.com>
Committed: Tue Aug 4 17:19:36 2015 +0530

----------------------------------------------------------------------
 CHANGES.txt                                 |   2 +
 docs/src/site/twiki/Configuration.twiki     | 113 ++++++++
 docs/src/site/twiki/Distributed-mode.twiki  | 198 ++++++++++++++
 docs/src/site/twiki/Embedded-mode.twiki     | 198 ++++++++++++++
 docs/src/site/twiki/InstallationSteps.twiki | 326 +++--------------------
 5 files changed, 551 insertions(+), 286 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/falcon/blob/77910aef/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index e1eae4f..6148bc6 100755
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -11,6 +11,8 @@ Trunk (Unreleased)
     FALCON-796 Enable users to triage data processing issues through falcon (Ajay Yadava)
     
   IMPROVEMENTS
+    FALCON-1301 Improve documentation for Installation(Pragya Mittal via Ajay Yadava)
+
     FALCON-1322 Add prefix in runtime.properties(Sandeep Samudrala via Ajay Yadava)
 
     FALCON-1317 Inconsistent JSON serialization(Ajay Yadava)

http://git-wip-us.apache.org/repos/asf/falcon/blob/77910aef/docs/src/site/twiki/Configuration.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Configuration.twiki b/docs/src/site/twiki/Configuration.twiki
new file mode 100644
index 0000000..37b5717
--- /dev/null
+++ b/docs/src/site/twiki/Configuration.twiki
@@ -0,0 +1,113 @@
+---+Configuring Falcon
+
+By default config directory used by falcon is {package dir}/conf. To override this (to use the same conf with multiple
+falcon upgrades), set environment variable FALCON_CONF to the path of the conf dir.
+
+falcon-env.sh has been added to the falcon conf. This file can be used to set various environment variables that you
+need for you services.
+In addition you can set any other environment variables you might need. This file will be sourced by falcon scripts
+before any commands are executed. The following environment variables are available to set.
+
+<verbatim>
+# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
+#export JAVA_HOME=
+
+# any additional java opts you want to set. This will apply to both client and server operations
+#export FALCON_OPTS=
+
+# any additional java opts that you want to set for client only
+#export FALCON_CLIENT_OPTS=
+
+# java heap size we want to set for the client. Default is 1024MB
+#export FALCON_CLIENT_HEAP=
+
+# any additional opts you want to set for prism service.
+#export FALCON_PRISM_OPTS=
+
+# java heap size we want to set for the prism service. Default is 1024MB
+#export FALCON_PRISM_HEAP=
+
+# any additional opts you want to set for falcon service.
+#export FALCON_SERVER_OPTS=
+
+# java heap size we want to set for the falcon server. Default is 1024MB
+#export FALCON_SERVER_HEAP=
+
+# What is is considered as falcon home dir. Default is the base location of the installed software
+#export FALCON_HOME_DIR=
+
+# Where log files are stored. Default is logs directory under the base install location
+#export FALCON_LOG_DIR=
+
+# Where pid files are stored. Default is logs directory under the base install location
+#export FALCON_PID_DIR=
+
+# where the falcon active mq data is stored. Default is logs/data directory under the base install location
+#export FALCON_DATA_DIR=
+
+# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
+#export FALCON_EXPANDED_WEBAPP_DIR=
+</verbatim>
+
+---++Advanced Configurations
+
+---+++Configuring Monitoring plugin to register catalog partitions
+Falcon comes with a monitoring plugin that registers catalog partition. This comes in really handy during migration from
+ filesystem based feeds to hcatalog based feeds.
+This plugin enables the user to de-couple the partition registration and assume that all partitions are already on
+hcatalog even before the migration, simplifying the hcatalog migration.
+
+By default this plugin is disabled.
+To enable this plugin and leverage the feature, there are 3 pre-requisites:
+<verbatim>
+In {package dir}/conf/startup.properties, add
+*.workflow.execution.listeners=org.apache.falcon.catalog.CatalogPartitionHandler
+
+In the cluster definition, ensure registry endpoint is defined.
+Ex:
+<interface type="registry" endpoint="thrift://localhost:1109" version="0.13.3"/>
+
+In the feed definition, ensure the corresponding catalog table is mentioned in feed-properties
+Ex:
+<properties>
+    <property name="catalog.table" value="catalog:default:in_table#year={YEAR};month={MONTH};day={DAY};hour={HOUR};
+    minute={MINUTE}"/>
+</properties>
+</verbatim>
+
+*NOTE : for Mac OS users*
+<verbatim>
+If you are using a Mac OS, you will need to configure the FALCON_SERVER_OPTS (explained above).
+
+In  {package dir}/conf/falcon-env.sh uncomment the following line
+#export FALCON_SERVER_OPTS=
+
+and change it to look as below
+export FALCON_SERVER_OPTS="-Djava.awt.headless=true -Djava.security.krb5.realm= -Djava.security.krb5.kdc="
+</verbatim>
+
+
+---+++Activemq
+
+* falcon server starts embedded active mq. To control this behaviour, set the following system properties using -D
+option in environment variable FALCON_OPTS:
+   * falcon.embeddedmq=<true/false> - Should server start embedded active mq, default true
+   * falcon.embeddedmq.port=<port> - Port for embedded active mq, default 61616
+   * falcon.embeddedmq.data=<path> - Data path for embedded active mq, default {package dir}/logs/data
+
+---+++Adding Extension Libraries
+
+Library extensions allows users to add custom libraries to entity lifecycles such as feed retention, feed replication
+and process execution. This is useful for usecases such as adding filesystem extensions. To enable this, add the
+following configs to startup.properties:
+*.libext.paths=<paths to be added to all entity lifecycles>
+
+*.libext.feed.paths=<paths to be added to all feed lifecycles>
+
+*.libext.feed.retentions.paths=<paths to be added to feed retention workflow>
+
+*.libext.feed.replication.paths=<paths to be added to feed replication workflow>
+
+*.libext.process.paths=<paths to be added to process workflow>
+
+The configured jars are added to falcon classpath and the corresponding workflows.

http://git-wip-us.apache.org/repos/asf/falcon/blob/77910aef/docs/src/site/twiki/Distributed-mode.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Distributed-mode.twiki b/docs/src/site/twiki/Distributed-mode.twiki
new file mode 100644
index 0000000..617ab51
--- /dev/null
+++ b/docs/src/site/twiki/Distributed-mode.twiki
@@ -0,0 +1,198 @@
+---+Distributed Mode
+
+
+Following are the steps needed to package and deploy Falcon in Embedded Mode. You need to complete Steps 1-3 mentioned
+ [[InstallationSteps][here]] before proceeding further.
+
+---++Package Falcon
+Ensure that you are in the base directory (where you cloned Falcon). Let’s call it {project dir}
+
+<verbatim>
+$mvn clean assembly:assembly -DskipTests -DskipCheck=true -Pdistributed,hadoop-2
+</verbatim>
+
+
+<verbatim>
+$ls {project dir}/target/
+</verbatim>
+
+It should give an output like below :
+<verbatim>
+apache-falcon-distributed-${project.version}-server.tar.gz
+apache-falcon-distributed-${project.version}-sources.tar.gz
+archive-tmp
+maven-shared-archive-resources
+</verbatim>
+
+   * apache-falcon-distributed-${project.version}-sources.tar.gz contains source files of Falcon repo.
+
+   * apache-falcon-distributed-${project.version}-server.tar.gz package contains project artifacts along with it's
+dependencies, configuration files and scripts required to deploy Falcon.
+
+
+Tar can be found in {project dir}/target/apache-falcon-distributed-${project.version}-server.tar.gz . This is the tar
+used for installing Falcon. Lets call it {falcon package}
+
+Tar is structured as follows.
+
+<verbatim>
+
+|- bin
+   |- falcon
+   |- falcon-start
+   |- falcon-stop
+   |- falcon-status
+   |- falcon-config.sh
+   |- service-start.sh
+   |- service-stop.sh
+   |- service-status.sh
+   |- prism-stop
+   |- prism-start
+   |- prism-status
+|- conf
+   |- startup.properties
+   |- runtime.properties
+   |- client.properties
+   |- prism.keystore
+   |- log4j.xml
+   |- falcon-env.sh
+|- docs
+|- client
+   |- lib (client support libs)
+|- server
+   |- webapp
+      |- falcon.war
+      |- prism.war
+|- oozie
+   |- conf
+   |- libext
+|- hadooplibs
+|- README
+|- NOTICE.txt
+|- LICENSE.txt
+|- DISCLAIMER.txt
+|- CHANGES.txt
+</verbatim>
+
+
+---++Installing & running Falcon
+
+---+++Installing Falcon
+
+Running Falcon in distributed mode requires bringing up both prism and server.As the name suggests Falcon prism splits
+the request it gets to the Falcon servers. It is a good practice to start prism and server with their corresponding
+configurations separately. Create separate directory for prism and server. Let's call them {falcon-prism-dir} and
+{falcon-server-dir} respectively.
+
+*For prism*
+<verbatim>
+$mkdir {falcon-prism-dir}
+$tar -xzvf {falcon package}
+</verbatim>
+
+*For server*
+<verbatim>
+$mkdir {falcon-server-dir}
+$tar -xzvf {falcon package}
+</verbatim>
+
+
+---+++Starting Prism
+
+<verbatim>
+cd {falcon-prism-dir}/falcon-distributed-${project.version}
+bin/prism-start [-port <port>]
+</verbatim>
+
+By default,
+* prism server starts at port 16443. To change the port, use -port option
+
+* falcon.enableTLS can be set to true or false explicitly to enable SSL, if not port that end with 443 will
+automatically put prism on https://
+
+* prism starts with conf from {falcon-prism-dir}/falcon-distributed-${project.version}/conf. To override this (to use
+the same conf with multiple prism upgrades), set environment variable FALCON_CONF to the path of conf dir. You can find
+the instructions for configuring Falcon [[Configuration][here]].
+
+*Enabling prism-client*
+*If prism is not started using default-port 16443 then edit the following property in
+{falcon-prism-dir}/falcon-distributed-${project.version}/conf/client.properties
+falcon.url=http://{machine-ip}:{prism-port}/
+
+
+---+++Starting Falcon Server
+
+<verbatim>
+$cd {falcon-server-dir}/falcon-distributed-${project.version}
+$bin/falcon-start [-port <port>]
+</verbatim>
+
+By default,
+* If falcon.enableTLS is set to true explicitly or not set at all, Falcon starts at port 15443 on https:// by default.
+
+* If falcon.enableTLS is set to false explicitly, Falcon starts at port 15000 on http://.
+
+* To change the port, use -port option.
+
+* If falcon.enableTLS is not set explicitly, port that ends with 443 will automatically put Falcon on https://. Any
+other port will put Falcon on http://.
+
+* server starts with conf from {falcon-server-dir}/falcon-distributed-${project.version}/conf. To override this (to use
+the same conf with multiple server upgrades), set environment variable FALCON_CONF to the path of conf dir. You can find
+ the instructions for configuring Falcon [[Configuration][here]].
+
+*Enabling server-client*
+*If server is not started using default-port 15443 then edit the following property in
+{falcon-server-dir}/falcon-distributed-${project.version}/conf/client.properties. You can find the instructions for
+configuring Falcon here.
+falcon.url=http://{machine-ip}:{server-port}/
+
+*NOTE* : https is the secure version of HTTP, the protocol over which data is sent between your browser and the website
+that you are connected to. By default Falcon runs in https mode. But user can configure it to http.
+
+
+---+++Using Falcon
+
+<verbatim>
+$cd {falcon-prism-dir}/falcon-distributed-${project.version}
+$bin/falcon admin -version
+Falcon server build version: {Version:"${project.version}-SNAPSHOT-rd7e2be9afa2a5dc96acd1ec9e325f39c6b2f17f7",
+Mode:"embedded"}
+
+$bin/falcon help
+(for more details about Falcon cli usage)
+</verbatim>
+
+
+---+++Dashboard
+
+Once Falcon / prism is started, you can view the status of Falcon entities using the Web-based dashboard. You can open
+your browser at the corresponding port to use the web UI.
+
+Falcon dashboard makes the REST api calls as user "falcon-dashboard". If this user does not exist on your Falcon and
+Oozie servers, please create the user.
+
+<verbatim>
+## create user.
+[root@falconhost ~] useradd -U -m falcon-dashboard -G users
+
+## verify user is created with membership in correct groups.
+[root@falconhost ~] groups falcon-dashboard
+falcon-dashboard : falcon-dashboard users
+[root@falconhost ~]
+</verbatim>
+
+
+---+++Stopping Falcon Server
+
+<verbatim>
+$cd {falcon-server-dir}/falcon-distributed-${project.version}
+$bin/falcon-stop
+</verbatim>
+
+---+++Stopping Falcon Prism
+
+<verbatim>
+$cd {falcon-prism-dir}/falcon-distributed-${project.version}
+$bin/prism-stop
+</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/77910aef/docs/src/site/twiki/Embedded-mode.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/Embedded-mode.twiki b/docs/src/site/twiki/Embedded-mode.twiki
new file mode 100644
index 0000000..96ae8ab
--- /dev/null
+++ b/docs/src/site/twiki/Embedded-mode.twiki
@@ -0,0 +1,198 @@
+---+Embedded Mode
+
+Following are the steps needed to package and deploy Falcon in Embedded Mode. You need to complete Steps 1-3 mentioned
+ [[InstallationSteps][here]] before proceeding further.
+
+---++Package Falcon
+Ensure that you are in the base directory (where you cloned Falcon). Let’s call it {project dir}
+
+<verbatim>
+$mvn clean assembly:assembly -DskipTests -DskipCheck=true
+</verbatim>
+
+<verbatim>
+$ls {project dir}/target/
+</verbatim>
+It should give an output like below :
+<verbatim>
+apache-falcon-${project.version}-bin.tar.gz
+apache-falcon-${project.version}-sources.tar.gz
+archive-tmp
+maven-shared-archive-resources
+</verbatim>
+
+* apache-falcon-${project.version}-sources.tar.gz contains source files of Falcon repo.
+
+* apache-falcon-${project.version}-bin.tar.gz package contains project artifacts along with it's dependencies,
+configuration files and scripts required to deploy Falcon.
+
+Tar can be found in {project dir}/target/apache-falcon-${project.version}-bin.tar.gz
+
+Tar is structured as follows :
+
+<verbatim>
+
+|- bin
+   |- falcon
+   |- falcon-start
+   |- falcon-stop
+   |- falcon-status
+   |- falcon-config.sh
+   |- service-start.sh
+   |- service-stop.sh
+   |- service-status.sh
+|- conf
+   |- startup.properties
+   |- runtime.properties
+   |- prism.keystore
+   |- client.properties
+   |- log4j.xml
+   |- falcon-env.sh
+|- docs
+|- client
+   |- lib (client support libs)
+|- server
+   |- webapp
+      |- falcon.war
+|- data
+   |- falcon-store
+   |- graphdb
+   |- localhost
+|- examples
+   |- app
+      |- hive
+      |- oozie-mr
+      |- pig
+   |- data
+   |- entity
+      |- filesystem
+      |- hcat
+|- oozie
+   |- conf
+   |- libext
+|- logs
+|- hadooplibs
+|- README
+|- NOTICE.txt
+|- LICENSE.txt
+|- DISCLAIMER.txt
+|- CHANGES.txt
+</verbatim>
+
+
+---++Installing & running Falcon
+
+Running Falcon in embedded mode requires bringing up server.
+
+<verbatim>
+$tar -xzvf {falcon package}
+$cd falcon-${project.version}
+</verbatim>
+
+
+---+++Starting Falcon Server
+<verbatim>
+$cd falcon-${project.version}
+$bin/falcon-start [-port <port>]
+</verbatim>
+
+By default,
+* If falcon.enableTLS is set to true explicitly or not set at all, Falcon starts at port 15443 on https:// by default.
+
+* If falcon.enableTLS is set to false explicitly, Falcon starts at port 15000 on http://.
+
+* To change the port, use -port option.
+
+* If falcon.enableTLS is not set explicitly, port that ends with 443 will automatically put Falcon on https://. Any
+other port will put Falcon on http://.
+
+* Server starts with conf from {falcon-server-dir}/falcon-distributed-${project.version}/conf. To override this (to use
+the same conf with multiple server upgrades), set environment variable FALCON_CONF to the path of conf dir. You can find
+ the instructions for configuring Falcon [[Configuration][here]].
+
+
+---+++Enabling server-client
+If server is not started using default-port 15443 then edit the following property in
+{falcon-server-dir}/falcon-${project.version}/conf/client.properties
+
+falcon.url=http://{machine-ip}:{server-port}/
+
+
+---+++Using Falcon
+<verbatim>
+$cd falcon-${project.version}
+$bin/falcon admin -version
+Falcon server build version: {Version:"${project.version}-SNAPSHOT-rd7e2be9afa2a5dc96acd1ec9e325f39c6b2f17f7",Mode:
+"embedded",Hadoop:"${hadoop.version}"}
+
+$bin/falcon help
+(for more details about Falcon cli usage)
+</verbatim>
+
+*Note* : https is the secure version of HTTP, the protocol over which data is sent between your browser and the website
+that you are connected to. By default Falcon runs in https mode. But user can configure it to http.
+
+
+---+++Dashboard
+
+Once Falcon server is started, you can view the status of Falcon entities using the Web-based dashboard. You can open
+your browser at the corresponding port to use the web UI.
+
+Falcon dashboard makes the REST api calls as user "falcon-dashboard". If this user does not exist on your Falcon and
+Oozie servers, please create the user.
+
+<verbatim>
+## create user.
+[root@falconhost ~] useradd -U -m falcon-dashboard -G users
+
+## verify user is created with membership in correct groups.
+[root@falconhost ~] groups falcon-dashboard
+falcon-dashboard : falcon-dashboard users
+[root@falconhost ~]
+</verbatim>
+
+
+---++Running Examples using embedded package
+<verbatim>
+$cd falcon-${project.version}
+$bin/falcon-start
+</verbatim>
+Make sure the Hadoop and Oozie endpoints are according to your setup in
+examples/entity/filesystem/standalone-cluster.xml
+The cluster locations,staging and working dirs, MUST be created prior to submitting a cluster entity to Falcon.
+*staging* must have 777 permissions and the parent dirs must have execute permissions
+*working* must have 755 permissions and the parent dirs must have execute permissions
+<verbatim>
+$bin/falcon entity -submit -type cluster -file examples/entity/filesystem/standalone-cluster.xml
+</verbatim>
+Submit input and output feeds:
+<verbatim>
+$bin/falcon entity -submit -type feed -file examples/entity/filesystem/in-feed.xml
+$bin/falcon entity -submit -type feed -file examples/entity/filesystem/out-feed.xml
+</verbatim>
+Set-up workflow for the process:
+<verbatim>
+$hadoop fs -put examples/app /
+</verbatim>
+Submit and schedule the process:
+<verbatim>
+$bin/falcon entity -submitAndSchedule -type process -file examples/entity/filesystem/oozie-mr-process.xml
+$bin/falcon entity -submitAndSchedule -type process -file examples/entity/filesystem/pig-process.xml
+</verbatim>
+Generate input data:
+<verbatim>
+$examples/data/generate.sh <<hdfs endpoint>>
+</verbatim>
+Get status of instances:
+<verbatim>
+$bin/falcon instance -status -type process -name oozie-mr-process -start 2013-11-15T00:05Z -end 2013-11-15T01:00Z
+</verbatim>
+
+HCat based example entities are in examples/entity/hcat.
+
+
+---+++Stopping Falcon Server
+<verbatim>
+$cd falcon-${project.version}
+$bin/falcon-stop
+</verbatim>

http://git-wip-us.apache.org/repos/asf/falcon/blob/77910aef/docs/src/site/twiki/InstallationSteps.twiki
----------------------------------------------------------------------
diff --git a/docs/src/site/twiki/InstallationSteps.twiki b/docs/src/site/twiki/InstallationSteps.twiki
index 1dd242a..3dd034b 100644
--- a/docs/src/site/twiki/InstallationSteps.twiki
+++ b/docs/src/site/twiki/InstallationSteps.twiki
@@ -1,322 +1,76 @@
----++ Building & Installing Falcon
+---+Building & Installing Falcon
 
 
----+++ Building Falcon
+---++Building Falcon
 
-<verbatim>
-You would need the following installed to build Falcon
-
-* JDK 1.7
-* Maven 3.x
-
-git clone https://git-wip-us.apache.org/repos/asf/falcon.git falcon
-
-cd falcon
-
-export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=256m -noverify" && mvn clean install
-
-[optionally -Dhadoop.version=<<hadoop.version>> can be appended to build for a specific version of hadoop]
-*Note:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards
-[optionally -Doozie.version=<<oozie version>> can be appended to build with a specific version of oozie.
-Oozie versions >= 4 are supported]
-Falcon build with JDK 1.7 using -noverify option
-
-</verbatim>
-
-Once the build successfully completes, artifacts can be packaged for deployment. The package can be built in embedded or distributed mode.
-
-*Embedded Mode*
-<verbatim>
-
-mvn clean assembly:assembly -DskipTests -DskipCheck=true
-
-</verbatim>
-
-Tar can be found in {project dir}/target/apache-falcon-${project.version}-bin.tar.gz
-
-Tar is structured as follows
-
-<verbatim>
-
-|- bin
-   |- falcon
-   |- falcon-start
-   |- falcon-stop
-   |- falcon-config.sh
-   |- service-start.sh
-   |- service-stop.sh
-|- conf
-   |- startup.properties
-   |- runtime.properties
-   |- client.properties
-   |- log4j.xml
-   |- falcon-env.sh
-|- docs
-|- client
-   |- lib (client support libs)
-|- server
-   |- webapp
-      |- falcon.war
-|- hadooplibs
-|- README
-|- NOTICE.txt
-|- LICENSE.txt
-|- DISCLAIMER.txt
-|- CHANGES.txt
-</verbatim>
-
-*Distributed Mode*
-
-<verbatim>
-
-mvn clean assembly:assembly -DskipTests -DskipCheck=true -Pdistributed,hadoop-2
-
-</verbatim>
-
-Tar can be found in {project dir}/target/apache-falcon-distributed-${project.version}-server.tar.gz
-
-Tar is structured as follows
-
-<verbatim>
-
-|- bin
-   |- falcon
-   |- falcon-start
-   |- falcon-stop
-   |- falcon-config.sh
-   |- service-start.sh
-   |- service-stop.sh
-   |- prism-stop
-   |- prism-start
-|- conf
-   |- startup.properties
-   |- runtime.properties
-   |- client.properties
-   |- log4j.xml
-   |- falcon-env.sh
-|- docs
-|- client
-   |- lib (client support libs)
-|- server
-   |- webapp
-      |- falcon.war
-      |- prism.war
-|- hadooplibs
-|- README
-|- NOTICE.txt
-|- LICENSE.txt
-|- DISCLAIMER.txt
-|- CHANGES.txt
-</verbatim>
-
----+++ Installing & running Falcon
-
-*Installing falcon*
-<verbatim>
-tar -xzvf {falcon package}
-cd falcon-distributed-${project.version} or falcon-${project.version}
-</verbatim>
-
-*Configuring Falcon*
-
-By default config directory used by falcon is {package dir}/conf. To override this set environment variable FALCON_CONF to the path of the conf dir.
-
-falcon-env.sh has been added to the falcon conf. This file can be used to set various environment variables that you need for you services.
-In addition you can set any other environment variables you might need. This file will be sourced by falcon scripts before any commands are executed. The following environment variables are available to set.
-
-<verbatim>
-# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
-#export JAVA_HOME=
-
-# any additional java opts you want to set. This will apply to both client and server operations
-#export FALCON_OPTS=
-
-# any additional java opts that you want to set for client only
-#export FALCON_CLIENT_OPTS=
-
-# java heap size we want to set for the client. Default is 1024MB
-#export FALCON_CLIENT_HEAP=
+---+++Prerequisites
 
-# any additional opts you want to set for prism service.
-#export FALCON_PRISM_OPTS=
+   * JDK 1.7
+   * Maven 3.x
 
-# java heap size we want to set for the prism service. Default is 1024MB
-#export FALCON_PRISM_HEAP=
 
-# any additional opts you want to set for falcon service.
-#export FALCON_SERVER_OPTS=
 
-# java heap size we want to set for the falcon server. Default is 1024MB
-#export FALCON_SERVER_HEAP=
-
-# What is is considered as falcon home dir. Default is the base location of the installed software
-#export FALCON_HOME_DIR=
-
-# Where log files are stored. Default is logs directory under the base install location
-#export FALCON_LOG_DIR=
-
-# Where pid files are stored. Default is logs directory under the base install location
-#export FALCON_PID_DIR=
-
-# where the falcon active mq data is stored. Default is logs/data directory under the base install location
-#export FALCON_DATA_DIR=
-
-# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
-#export FALCON_EXPANDED_WEBAPP_DIR=
-</verbatim>
-
-*Configuring Monitoring plugin to register catalog partitions*
-Falcon comes with a monitoring plugin that registers catalog partition. This comes in really handy during migration from filesystem based feeds to hcatalog based feeds.
-This plugin enables the user to de-couple the partition registration and assume that all partitions are already on hcatalog even before the migration, simplifying the hcatalog migration.
-
-By default this plugin is disabled.
-To enable this plugin and leverage the feature, there are 3 pre-requisites:
+---+++Step 1 - Clone the Falcon repository
 
 <verbatim>
-In {package dir}/conf/startup.properties, add
-*.workflow.execution.listeners=org.apache.falcon.catalog.CatalogPartitionHandler
-
-In the cluster definition, ensure registry endpoint is defined.
-Ex:
-<interface type="registry" endpoint="thrift://localhost:1109" version="0.13.3"/>
-
-In the feed definition, ensure the corresponding catalog table is mentioned in feed-properties
-Ex:
-<properties>
-    <property name="catalog.table" value="catalog:default:in_table#year={YEAR};month={MONTH};day={DAY};hour={HOUR};minute={MINUTE}"/>
-</properties>
+$git clone https://git-wip-us.apache.org/repos/asf/falcon.git falcon
 </verbatim>
 
-*NOTE for Mac OS users*
-<verbatim>
-If you are using a Mac OS, you will need to configure the FALCON_SERVER_OPTS (explained above).
-
-In  {package dir}/conf/falcon-env.sh uncomment the following line
-#export FALCON_SERVER_OPTS=
 
-and change it to look as below
-export FALCON_SERVER_OPTS="-Djava.awt.headless=true -Djava.security.krb5.realm= -Djava.security.krb5.kdc="
-</verbatim>
+---+++Step 2 - Build Falcon
 
-*Starting Falcon Server*
 <verbatim>
-bin/falcon-start [-port <port>]
+$cd falcon
+$export MAVEN_OPTS="-Xmx1024m -XX:MaxPermSize=256m -noverify" && mvn clean install
 </verbatim>
+It builds and installs the package into the local repository, for use as a dependency in other projects locally.
 
-By default,
-* If falcon.enableTLS is set to true explicitly or not set at all, falcon starts at port 15443 on https:// by default.
-* If falcon.enableTLS is set to false explicitly, falcon starts at port 15000 on http://.
-* To change the port, use -port option.
-   * If falcon.enableTLS is not set explicitly, port that ends with 443 will automatically put falcon on https://. Any other port will put falcon on http://.
-* falcon server starts embedded active mq. To control this behaviour, set the following system properties using -D option in environment variable FALCON_OPTS:
-   * falcon.embeddedmq=<true/false> - Should server start embedded active mq, default true
-   * falcon.embeddedmq.port=<port> - Port for embedded active mq, default 61616
-   * falcon.embeddedmq.data=<path> - Data path for embedded active mq, default {package dir}/logs/data
-* falcon server starts with conf from {package dir}/conf. To override this (to use the same conf with multiple falcon upgrades), set environment variable FALCON_CONF to the path of conf dir
+[optionally -Dhadoop.version=<<hadoop.version>> can be appended to build for a specific version of Hadoop]
 
-__Adding Extension Libraries__
-Library extensions allows users to add custom libraries to entity lifecycles such as feed retention, feed replication and process execution. This is useful for usecases such as adding filesystem extensions. To enable this, add the following configs to startup.properties:
-*.libext.paths=<paths to be added to all entity lifecycles>
-*.libext.feed.paths=<paths to be added to all feed lifecycles>
-*.libext.feed.retentions.paths=<paths to be added to feed retention workflow>
-*.libext.feed.replication.paths=<paths to be added to feed replication workflow>
-*.libext.process.paths=<paths to be added to process workflow>
+*NOTE:* Falcon drops support for Hadoop-1 and only supports Hadoop-2 from Falcon 0.6 onwards
+[optionally -Doozie.version=<<oozie version>> can be appended to build with a specific version of Oozie. Oozie versions
+>= 4 are supported]
+NOTE: Falcon builds with JDK 1.7 using -noverify option
 
-The configured jars are added to falcon classpath and the corresponding workflows
 
 
-*Starting Prism*
-<verbatim>
-bin/prism-start [-port <port>]
-</verbatim>
+---+++Step 3 - Package and Deploy Falcon
 
-By default, 
-* prism server starts at port 16443. To change the port, use -port option
-   * falcon.enableTLS can be set to true or false explicitly to enable SSL, if not port that end with 443 will automatically put prism on https://
-* prism starts with conf from {package dir}/conf. To override this (to use the same conf with multiple prism upgrades), set environment variable FALCON_CONF to the path of conf dir
+Once the build successfully completes, artifacts can be packaged for deployment using the assembly plugin. The Assembly
+Plugin for Maven is primarily intended to allow users to aggregate the project output along with its dependencies,
+modules, site documentation, and other files into a single distributable archive. There are two basic ways in which you
+can deploy Falcon - Embedded mode(also known as Stand Alone Mode) and Distributed mode. Your next steps will vary based
+on the mode in which you want to deploy Falcon.
 
-*Using Falcon*
-<verbatim>
-bin/falcon admin -version
-Falcon server build version: {Version:"0.3-SNAPSHOT-rd7e2be9afa2a5dc96acd1ec9e325f39c6b2f17f7",Mode:"embedded"}
+*NOTE* : Oozie is being extended by Falcon (particularly on el-extensions) and hence the need for Falcon to build &
+re-package Oozie, so that users of Falcon can work with the right Oozie setup. Though Oozie is packaged by Falcon, it
+needs to be deployed separately by the administrator and is not auto deployed along with Falcon.
 
-----
 
-bin/falcon help
-(for more details about falcon cli usage)
-</verbatim>
+---++++Embedded/Stand Alone Mode
+Embedded mode is useful when the Hadoop jobs and relevant data processing involve only one Hadoop cluster. In this mode
+ there is a single Falcon server that contacts the scheduler to schedule jobs on Hadoop. All the process/feed requests
+ like submit, schedule, suspend, kill etc. are sent to this server. For running Falcon in this mode one should use the
+ Falcon which has been built using standalone option. You can find the instructions for Embedded mode setup
+ [[Embedded-mode][here]].
 
-*Dashboard*
 
-Once falcon / prism is started, you can view the status of falcon entities using the Web-based dashboard. The web UI works in both distributed and embedded mode. You can open your browser at the corresponding port to use the web UI.
-
-Falcon dashboard makes the REST api calls as user "falcon-dashboard". If this user does not exist on your falcon and oozie servers, please create the user.
-
-<verbatim>
-## create user.
-[root@falconhost ~] useradd -U -m falcon-dashboard -G users
-
-## verify user is created with membership in correct groups.
-[root@falconhost ~] groups falcon-dashboard
-falcon-dashboard : falcon-dashboard users
-[root@falconhost ~]
-</verbatim>
+---++++Distributed Mode
+Distributed mode is for multiple (colos) instances of Hadoop clusters, and multiple workflow schedulers to handle them.
+In this mode Falcon has 2 components: Prism and Server(s). Both Prism and Server(s) have their own their own config
+locations(startup and runtime properties). In this mode Prism acts as a contact point for Falcon servers. While
+ all commands are available through Prism, only read and instance api's are available through Server. You can find the
+ instructions for Distributed Mode setup [[Distributed-mode][here]].
 
-*Stopping Falcon Server*
-<verbatim>
-bin/falcon-stop
-</verbatim>
 
-*Stopping Prism*
-<verbatim>
-bin/prism-stop
-</verbatim>
 
----+++ Preparing Oozie and Falcon packages for deployment
+---+++Preparing Oozie and Falcon packages for deployment
 <verbatim>
-cd <<project home>>
-src/bin/package.sh <<hadoop-version>> <<oozie-version>>
+$cd <<project home>>
+$src/bin/package.sh <<hadoop-version>> <<oozie-version>>
 
 >> ex. src/bin/package.sh 1.1.2 4.0.1 or src/bin/package.sh 0.20.2-cdh3u5 4.0.1
 >> ex. src/bin/package.sh 2.5.0 4.0.0
 >> Falcon package is available in <<falcon home>>/target/apache-falcon-<<version>>-bin.tar.gz
 >> Oozie package is available in <<falcon home>>/target/oozie-4.0.1-distro.tar.gz
 </verbatim>
-
----+++ Running Examples using embedded package
-<verbatim>
-bin/falcon-start
-</verbatim>
-Make sure the hadoop and oozie endpoints are according to your setup in examples/entity/filesystem/standalone-cluster.xml
-The cluster locations,staging and working dirs, MUST be created prior to submitting a cluster entity to Falcon.
-*staging* must have 777 permissions and the parent dirs must have execute permissions
-*working* must have 755 permissions and the parent dirs must have execute permissions
-<verbatim>
-bin/falcon entity -submit -type cluster -file examples/entity/filesystem/standalone-cluster.xml
-</verbatim>
-Submit input and output feeds:
-<verbatim>
-bin/falcon entity -submit -type feed -file examples/entity/filesystem/in-feed.xml
-bin/falcon entity -submit -type feed -file examples/entity/filesystem/out-feed.xml
-</verbatim>
-Set-up workflow for the process:
-<verbatim>
-hadoop fs -put examples/app /
-</verbatim>
-Submit and schedule the process:
-<verbatim>
-bin/falcon entity -submitAndSchedule -type process -file examples/entity/filesystem/oozie-mr-process.xml
-bin/falcon entity -submitAndSchedule -type process -file examples/entity/filesystem/pig-process.xml
-</verbatim>
-Generate input data:
-<verbatim>
-examples/data/generate.sh <<hdfs endpoint>>
-</verbatim>
-Get status of instances:
-<verbatim>
-bin/falcon instance -status -type process -name oozie-mr-process -start 2013-11-15T00:05Z -end 2013-11-15T01:00Z
-</verbatim>
-
-HCat based example entities are in examples/entity/hcat.
-
-


[2/3] falcon git commit: FALCON-1297 Falcon Unit which supports Submit and Schedule of jobs. Contributed by Pavan Kumar Kolamuri.

Posted by aj...@apache.org.
FALCON-1297 Falcon Unit which supports Submit and Schedule of jobs. Contributed by Pavan Kumar Kolamuri.


Project: http://git-wip-us.apache.org/repos/asf/falcon/repo
Commit: http://git-wip-us.apache.org/repos/asf/falcon/commit/3f00d051
Tree: http://git-wip-us.apache.org/repos/asf/falcon/tree/3f00d051
Diff: http://git-wip-us.apache.org/repos/asf/falcon/diff/3f00d051

Branch: refs/heads/master
Commit: 3f00d05171e39c75713d1f6d7ff00cfbab16bf89
Parents: 5a3e1d6
Author: Ajay Yadava <aj...@gmail.com>
Authored: Tue Aug 4 17:13:47 2015 +0530
Committer: Ajay Yadava <aj...@gmail.com>
Committed: Tue Aug 4 17:13:47 2015 +0530

----------------------------------------------------------------------
 CHANGES.txt                                     |   8 +-
 .../falcon/client/AbstractFalconClient.java     |  53 +++
 .../org/apache/falcon/client/FalconClient.java  |   2 +-
 .../entity/parser/ClusterEntityParser.java      |  13 +-
 .../falcon/entity/store/ConfigurationStore.java |  54 ++-
 .../java/org/apache/falcon/util/DateUtil.java   |  39 ++
 .../falcon/workflow/util/OozieConstants.java    |  33 ++
 .../apache/falcon/hadoop/JailedFileSystem.java  |   2 +-
 .../apache/falcon/oozie/OozieEntityBuilder.java |   8 +-
 .../workflow/engine/OozieClientFactory.java     |  20 +-
 .../workflow/engine/OozieWorkflowEngine.java    |  21 +-
 .../oozie/client/LocalOozieClientBundle.java    | 382 +++++++++++++++++++
 .../oozie/client/LocalProxyOozieClient.java     | 188 +++++++++
 pom.xml                                         |   1 +
 unit/pom.xml                                    | 106 +++++
 .../java/org/apache/falcon/unit/FalconUnit.java | 215 +++++++++++
 .../apache/falcon/unit/FalconUnitClient.java    | 250 ++++++++++++
 .../apache/falcon/unit/FalconUnitHelper.java    | 100 +++++
 .../unit/LocalFalconClientProtocolProvider.java |  62 +++
 ...op.mapreduce.protocol.ClientProtocolProvider |  18 +
 unit/src/main/resources/core-site.xml           |  38 ++
 unit/src/main/resources/deploy.properties       |  21 +
 .../main/resources/localoozie-log4j.properties  |  34 ++
 unit/src/main/resources/log4j.xml               |  91 +++++
 unit/src/main/resources/mapred-site.xml         |  35 ++
 unit/src/main/resources/oozie-site.xml          | 170 +++++++++
 unit/src/main/resources/startup.properties      | 129 +++++++
 .../apache/falcon/unit/FalconUnitTestBase.java  | 317 +++++++++++++++
 .../org/apache/falcon/unit/TestFalconUnit.java  |  58 +++
 .../falcon/unit/examples/JavaExample.java       |  65 ++++
 unit/src/test/resources/cluster-template.xml    |  36 ++
 unit/src/test/resources/infeed.xml              |  39 ++
 unit/src/test/resources/input.txt               |  18 +
 unit/src/test/resources/outfeed.xml             |  39 ++
 unit/src/test/resources/process.xml             |  50 +++
 unit/src/test/resources/workflow.xml            |  43 +++
 .../falcon/resource/EntityManagerJerseyIT.java  |   6 +-
 .../resource/ProcessInstanceManagerIT.java      |   3 +-
 .../org/apache/falcon/util/OozieTestUtils.java  |  17 +-
 39 files changed, 2722 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 50ce4d2..e1eae4f 100755
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -4,11 +4,15 @@ Trunk (Unreleased)
   INCOMPATIBLE CHANGES
 
   NEW FEATURES
+    FALCON-1297 Falcon Unit which supports Submit and Schedule of jobs(Pavan Kumar Kolamuri via Ajay Yadava)
+
     FALCON-1039 Add instance dependency API in falcon (Ajay Yadava)
 
     FALCON-796 Enable users to triage data processing issues through falcon (Ajay Yadava)
     
   IMPROVEMENTS
+    FALCON-1322 Add prefix in runtime.properties(Sandeep Samudrala via Ajay Yadava)
+
     FALCON-1317 Inconsistent JSON serialization(Ajay Yadava)
 
     FALCON-1324 Pagination API breaks backward compatibility(Ajay Yadava).
@@ -150,9 +154,7 @@ Release Version: 0.6.1
    FALCON-822 Add reverse look up API (Ajay Yadava via Suhas Vasu)
 
   IMPROVEMENTS
-   FALCON-1322 Add prefix in runtime.properties(Sandeep Samudrala via Ajay Yadava)
-
-   FALCON-1280 Update docs/license licenses with right copyright 
+   FALCON-1280 Update docs/license licenses with right copyright
    information (Shaik Idris Ali)
 
    FALCON-1276 Verify licensing in html5-ui module. 

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java b/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java
new file mode 100644
index 0000000..bb6d8c9
--- /dev/null
+++ b/client/src/main/java/org/apache/falcon/client/AbstractFalconClient.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.falcon.client;
+
+import org.apache.falcon.entity.v0.EntityType;
+import org.apache.falcon.resource.APIResult;
+
+import java.io.IOException;
+
+/**
+ * Abstract Client API to submit and manage Falcon Entities (Cluster, Feed, Process) jobs
+ * against an Falcon instance.
+ */
+public abstract class AbstractFalconClient {
+
+    /**
+     * Submit a new entity. Entities can be of type feed, process or data end
+     * points. Entity definitions are validated structurally against schema and
+     * subsequently for other rules before they are admitted into the system.
+     * @param entityType
+     * @param filePath
+     * @return
+     * @throws FalconCLIException
+     */
+    public abstract APIResult submit(String entityType, String filePath) throws FalconCLIException,
+            IOException;
+
+    /**
+     * Schedules an submitted process entity immediately.
+     * @param entityType
+     * @param entityName
+     * @param colo
+     * @return
+     * @throws FalconCLIException
+     */
+    public abstract APIResult schedule(EntityType entityType, String entityName, String colo) throws FalconCLIException;
+
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/client/src/main/java/org/apache/falcon/client/FalconClient.java
----------------------------------------------------------------------
diff --git a/client/src/main/java/org/apache/falcon/client/FalconClient.java b/client/src/main/java/org/apache/falcon/client/FalconClient.java
index d507371..9649e10 100644
--- a/client/src/main/java/org/apache/falcon/client/FalconClient.java
+++ b/client/src/main/java/org/apache/falcon/client/FalconClient.java
@@ -74,7 +74,7 @@ import java.util.Properties;
  * Client API to submit and manage Falcon Entities (Cluster, Feed, Process) jobs
  * against an Falcon instance.
  */
-public class FalconClient {
+public class FalconClient extends AbstractFalconClient {
 
     public static final String WS_HEADER_PREFIX = "header:";
     public static final String USER = System.getProperty("user.name");

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/common/src/main/java/org/apache/falcon/entity/parser/ClusterEntityParser.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/parser/ClusterEntityParser.java b/common/src/main/java/org/apache/falcon/entity/parser/ClusterEntityParser.java
index 59b0910..5756f84 100644
--- a/common/src/main/java/org/apache/falcon/entity/parser/ClusterEntityParser.java
+++ b/common/src/main/java/org/apache/falcon/entity/parser/ClusterEntityParser.java
@@ -34,6 +34,7 @@ import org.apache.falcon.hadoop.HadoopClientFactory;
 import org.apache.falcon.security.SecurityUtil;
 import org.apache.falcon.util.StartupProperties;
 import org.apache.falcon.workflow.WorkflowEngineFactory;
+import org.apache.falcon.workflow.util.OozieConstants;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -46,6 +47,7 @@ import org.slf4j.LoggerFactory;
 
 import javax.jms.ConnectionFactory;
 import java.io.IOException;
+import java.net.URI;
 
 /**
  * Parser that parses cluster entity definition.
@@ -92,7 +94,12 @@ public class ClusterEntityParser extends EntityParser<Cluster> {
     private void validateScheme(Cluster cluster, Interfacetype interfacetype)
         throws ValidationException {
         final String endpoint = ClusterHelper.getInterface(cluster, interfacetype).getEndpoint();
-        if (new Path(endpoint).toUri().getScheme() == null) {
+        URI uri = new Path(endpoint).toUri();
+        if (uri.getScheme() == null) {
+            if (Interfacetype.WORKFLOW == interfacetype
+                    && uri.toString().equals(OozieConstants.LOCAL_OOZIE)) {
+                return;
+            }
             throw new ValidationException("Cannot get valid scheme for interface: "
                     + interfacetype + " of cluster: " + cluster.getName());
         }
@@ -146,7 +153,9 @@ public class ClusterEntityParser extends EntityParser<Cluster> {
     protected void validateWorkflowInterface(Cluster cluster) throws ValidationException {
         final String workflowUrl = ClusterHelper.getOozieUrl(cluster);
         LOG.info("Validating workflow interface: {}", workflowUrl);
-
+        if (OozieConstants.LOCAL_OOZIE.equals(workflowUrl)) {
+            return;
+        }
         try {
             if (!WorkflowEngineFactory.getWorkflowEngine().isAlive(cluster)) {
                 throw new ValidationException("Unable to reach Workflow server:" + workflowUrl);

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java b/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
index b5f531a..7b53ebb 100644
--- a/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
+++ b/common/src/main/java/org/apache/falcon/entity/store/ConfigurationStore.java
@@ -61,6 +61,7 @@ public final class ConfigurationStore implements FalconService {
     private static final Logger LOG = LoggerFactory.getLogger(ConfigurationStore.class);
     private static final Logger AUDIT = LoggerFactory.getLogger("AUDIT");
     private static final String UTF_8 = CharEncoding.UTF_8;
+    private final boolean shouldPersist;
 
     private static final FsPermission STORE_PERMISSION =
             new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
@@ -93,17 +94,20 @@ public final class ConfigurationStore implements FalconService {
         return STORE;
     }
 
-    private final FileSystem fs;
-    private final Path storePath;
+    private FileSystem fs;
+    private Path storePath;
 
     private ConfigurationStore() {
         for (EntityType type : EntityType.values()) {
             dictionary.put(type, new ConcurrentHashMap<String, Entity>());
         }
 
-        String uri = StartupProperties.get().getProperty("config.store.uri");
-        storePath = new Path(uri);
-        fs = initializeFileSystem();
+        shouldPersist = Boolean.parseBoolean(StartupProperties.get().getProperty("config.store.persist", "true"));
+        if (shouldPersist) {
+            String uri = StartupProperties.get().getProperty("config.store.uri");
+            storePath = new Path(uri);
+            fs = initializeFileSystem();
+        }
     }
 
     /**
@@ -140,24 +144,26 @@ public final class ConfigurationStore implements FalconService {
             registerListener(listener);
         }
 
-        try {
-            for (EntityType type : ENTITY_LOAD_ORDER) {
-                ConcurrentHashMap<String, Entity> entityMap = dictionary.get(type);
-                FileStatus[] files = fs.globStatus(new Path(storePath, type.name() + Path.SEPARATOR + "*"));
-                if (files != null) {
-                    for (FileStatus file : files) {
-                        String fileName = file.getPath().getName();
-                        String encodedEntityName = fileName.substring(0, fileName.length() - 4); // drop
-                        // ".xml"
-                        String entityName = URLDecoder.decode(encodedEntityName, UTF_8);
-                        Entity entity = restore(type, entityName);
-                        entityMap.put(entityName, entity);
-                        onReload(entity);
+        if (shouldPersist) {
+            try {
+                for (EntityType type : ENTITY_LOAD_ORDER) {
+                    ConcurrentHashMap<String, Entity> entityMap = dictionary.get(type);
+                    FileStatus[] files = fs.globStatus(new Path(storePath, type.name() + Path.SEPARATOR + "*"));
+                    if (files != null) {
+                        for (FileStatus file : files) {
+                            String fileName = file.getPath().getName();
+                            String encodedEntityName = fileName.substring(0, fileName.length() - 4); // drop
+                            // ".xml"
+                            String entityName = URLDecoder.decode(encodedEntityName, UTF_8);
+                            Entity entity = restore(type, entityName);
+                            entityMap.put(entityName, entity);
+                            onReload(entity);
+                        }
                     }
                 }
+            } catch (IOException e) {
+                throw new FalconException("Unable to restore configurations", e);
             }
-        } catch (IOException e) {
-            throw new FalconException("Unable to restore configurations", e);
         }
     }
 
@@ -261,7 +267,7 @@ public final class ConfigurationStore implements FalconService {
                 return (T) updatesInProgress.get();
             }
             T entity = (T) entityMap.get(name);
-            if (entity == NULL) { // Object equality being checked
+            if (entity == NULL && shouldPersist) { // Object equality being checked
                 try {
                     entity = this.restore(type, name);
                 } catch (IOException e) {
@@ -322,6 +328,9 @@ public final class ConfigurationStore implements FalconService {
      * @throws FalconException
      */
     private void persist(EntityType type, Entity entity) throws IOException, FalconException {
+        if (!shouldPersist) {
+            return;
+        }
         OutputStream out = fs
                 .create(new Path(storePath,
                         type + Path.SEPARATOR + URLEncoder.encode(entity.getName(), UTF_8) + ".xml"));
@@ -344,6 +353,9 @@ public final class ConfigurationStore implements FalconService {
      * @throws IOException If any error in accessing the storage
      */
     private void archive(EntityType type, String name) throws IOException {
+        if (!shouldPersist) {
+            return;
+        }
         Path archivePath = new Path(storePath, "archive" + Path.SEPARATOR + type);
         HadoopClientFactory.mkdirs(fs, archivePath, STORE_PERMISSION);
         fs.rename(new Path(storePath, type + Path.SEPARATOR + URLEncoder.encode(name, UTF_8) + ".xml"),

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/common/src/main/java/org/apache/falcon/util/DateUtil.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/util/DateUtil.java b/common/src/main/java/org/apache/falcon/util/DateUtil.java
new file mode 100644
index 0000000..e736340
--- /dev/null
+++ b/common/src/main/java/org/apache/falcon/util/DateUtil.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.util;
+
+import java.util.Calendar;
+import java.util.Date;
+import java.util.TimeZone;
+
+/**
+ * Helper to get date operations.
+ */
+public final class DateUtil {
+
+    private DateUtil() {}
+
+    public static Date getNextMinute(Date time) throws Exception {
+        Calendar insCal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
+        insCal.setTime(time);
+
+        insCal.add(Calendar.MINUTE, 1);
+        return insCal.getTime();
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/common/src/main/java/org/apache/falcon/workflow/util/OozieConstants.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/falcon/workflow/util/OozieConstants.java b/common/src/main/java/org/apache/falcon/workflow/util/OozieConstants.java
new file mode 100644
index 0000000..05f248e
--- /dev/null
+++ b/common/src/main/java/org/apache/falcon/workflow/util/OozieConstants.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.falcon.workflow.util;
+
+/**
+ * Oozie Constants used across multiple modules.
+ */
+public final class OozieConstants {
+    /**
+     * Constant for the oozie running in local.
+     */
+    public static final String LOCAL_OOZIE = "localoozie";
+
+    private OozieConstants() {
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/hadoop-dependencies/src/main/java/org/apache/falcon/hadoop/JailedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-dependencies/src/main/java/org/apache/falcon/hadoop/JailedFileSystem.java b/hadoop-dependencies/src/main/java/org/apache/falcon/hadoop/JailedFileSystem.java
index 7156bbd..d5b2eb3 100644
--- a/hadoop-dependencies/src/main/java/org/apache/falcon/hadoop/JailedFileSystem.java
+++ b/hadoop-dependencies/src/main/java/org/apache/falcon/hadoop/JailedFileSystem.java
@@ -58,7 +58,7 @@ public class JailedFileSystem extends FileSystem {
             throw new IOException("Incomplete Jail URI, no jail base: "+ name);
         }
         basePath = new Path(conf.get("jail.base", System.getProperty("hadoop.tmp.dir",
-                        System.getProperty("user.dir") + "/webapp/target/tmp-hadoop-"
+                        System.getProperty("user.dir") + "/target/falcon/tmp-hadoop-"
                                 + System.getProperty("user.name"))) + "/jail-fs/" + base).toUri().getPath();
         this.uri = URI.create(name.getScheme()+"://"+name.getAuthority());
     }

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/oozie/src/main/java/org/apache/falcon/oozie/OozieEntityBuilder.java
----------------------------------------------------------------------
diff --git a/oozie/src/main/java/org/apache/falcon/oozie/OozieEntityBuilder.java b/oozie/src/main/java/org/apache/falcon/oozie/OozieEntityBuilder.java
index 9ca0ac1..9a6b14c 100644
--- a/oozie/src/main/java/org/apache/falcon/oozie/OozieEntityBuilder.java
+++ b/oozie/src/main/java/org/apache/falcon/oozie/OozieEntityBuilder.java
@@ -26,6 +26,7 @@ import org.apache.falcon.entity.ClusterHelper;
 import org.apache.falcon.entity.v0.Entity;
 import org.apache.falcon.entity.v0.cluster.Cluster;
 import org.apache.falcon.entity.v0.cluster.ClusterLocationType;
+import org.apache.falcon.entity.v0.cluster.Interfacetype;
 import org.apache.falcon.entity.v0.cluster.Property;
 import org.apache.falcon.entity.v0.feed.Feed;
 import org.apache.falcon.entity.v0.process.Output;
@@ -38,6 +39,7 @@ import org.apache.falcon.service.FalconPathFilter;
 import org.apache.falcon.service.SharedLibraryHostingService;
 import org.apache.falcon.util.StartupProperties;
 import org.apache.falcon.workflow.engine.AbstractWorkflowEngine;
+import org.apache.falcon.workflow.util.OozieConstants;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -168,8 +170,10 @@ public abstract class OozieEntityBuilder<T extends Entity> {
         properties.setProperty(AbstractWorkflowEngine.NAME_NODE, ClusterHelper.getStorageUrl(cluster));
         properties.setProperty(AbstractWorkflowEngine.JOB_TRACKER, ClusterHelper.getMREndPoint(cluster));
         properties.setProperty("colo.name", cluster.getColo());
-
-        properties.setProperty(OozieClient.USE_SYSTEM_LIBPATH, "true");
+        final String endpoint = ClusterHelper.getInterface(cluster, Interfacetype.WORKFLOW).getEndpoint();
+        if (!OozieConstants.LOCAL_OOZIE.equals(endpoint)) {
+            properties.setProperty(OozieClient.USE_SYSTEM_LIBPATH, "true");
+        }
         properties.setProperty("falcon.libpath",
                 ClusterHelper.getLocation(cluster, ClusterLocationType.WORKING).getPath()  + "/lib");
 

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieClientFactory.java
----------------------------------------------------------------------
diff --git a/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieClientFactory.java b/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieClientFactory.java
index 622238a..ae5c5fa 100644
--- a/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieClientFactory.java
+++ b/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieClientFactory.java
@@ -23,7 +23,11 @@ import org.apache.falcon.entity.ClusterHelper;
 import org.apache.falcon.entity.store.ConfigurationStore;
 import org.apache.falcon.entity.v0.EntityType;
 import org.apache.falcon.entity.v0.cluster.Cluster;
+import org.apache.falcon.workflow.util.OozieConstants;
+import org.apache.oozie.client.LocalProxyOozieClient;
+import org.apache.oozie.client.OozieClient;
 import org.apache.oozie.client.ProxyOozieClient;
+import org.apache.oozie.local.LocalOozie;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -33,13 +37,12 @@ import org.slf4j.LoggerFactory;
 public final class OozieClientFactory {
 
     private static final Logger LOG = LoggerFactory.getLogger(OozieClientFactory.class);
-    private static final String LOCAL_OOZIE = "local";
 
     private static volatile boolean localInitialized = false;
 
     private OozieClientFactory() {}
 
-    public static synchronized ProxyOozieClient get(Cluster cluster)
+    public static synchronized OozieClient get(Cluster cluster)
         throws FalconException {
 
         assert cluster != null : "Cluster cant be null";
@@ -48,28 +51,27 @@ public final class OozieClientFactory {
         return getClientRef(oozieUrl);
     }
 
-    public static ProxyOozieClient get(String clusterName) throws FalconException {
+    public static OozieClient get(String clusterName) throws FalconException {
         return get((Cluster) ConfigurationStore.get().get(EntityType.CLUSTER, clusterName));
     }
 
-    private static ProxyOozieClient getClientRef(String oozieUrl)
+    private static OozieClient getClientRef(String oozieUrl)
         throws FalconException {
 
-        if (LOCAL_OOZIE.equals(oozieUrl)) {
+        if (OozieConstants.LOCAL_OOZIE.equals(oozieUrl)) {
             return getLocalOozieClient();
         } else {
             return new ProxyOozieClient(oozieUrl);
         }
     }
 
-    private static ProxyOozieClient getLocalOozieClient() throws FalconException {
+    private static OozieClient getLocalOozieClient() throws FalconException {
         try {
             if (!localInitialized) {
-                //LocalOozie.start();
+                LocalOozie.start();
                 localInitialized = true;
             }
-            //return LocalOozie.getClient();
-            return null;
+            return new LocalProxyOozieClient();
         } catch (Exception e) {
             throw new FalconException(e);
         }

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java
----------------------------------------------------------------------
diff --git a/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java b/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java
index 4085b8f..2f3dc6f 100644
--- a/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java
+++ b/oozie/src/main/java/org/apache/falcon/workflow/engine/OozieWorkflowEngine.java
@@ -58,7 +58,6 @@ import org.apache.oozie.client.Job;
 import org.apache.oozie.client.Job.Status;
 import org.apache.oozie.client.OozieClient;
 import org.apache.oozie.client.OozieClientException;
-import org.apache.oozie.client.ProxyOozieClient;
 import org.apache.oozie.client.WorkflowAction;
 import org.apache.oozie.client.WorkflowJob;
 import org.apache.oozie.client.rest.RestConstants;
@@ -210,7 +209,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
 
     private void dryRunInternal(Cluster cluster, Path buildPath) throws FalconException {
         BUNDLEAPP bundle = OozieBundleBuilder.unmarshal(cluster, buildPath);
-        ProxyOozieClient client = OozieClientFactory.get(cluster.getName());
+        OozieClient client = OozieClientFactory.get(cluster.getName());
         for (COORDINATOR coord : bundle.getCoordinator()) {
             Properties props = new Properties();
             props.setProperty(OozieClient.COORDINATOR_APP_PATH, coord.getAppPath());
@@ -396,7 +395,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
     }
 
     private void killBundle(String clusterName, BundleJob job) throws FalconException {
-        ProxyOozieClient client = OozieClientFactory.get(clusterName);
+        OozieClient client = OozieClientFactory.get(clusterName);
         try {
             //kill all coords
             for (CoordinatorJob coord : job.getCoordinators()) {
@@ -459,7 +458,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
             List<Instance> runInstances = new ArrayList<Instance>();
 
             for (String cluster : clusters) {
-                ProxyOozieClient client = OozieClientFactory.get(cluster);
+                OozieClient client = OozieClientFactory.get(cluster);
                 List<String> wfNames = EntityUtil.getWorkflowNames(entity);
                 List<WorkflowJob> wfs = getRunningWorkflows(cluster, wfNames);
                 if (wfs != null) {
@@ -615,7 +614,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
             }
 
             List<BundleJob> bundles = entry.getValue();
-            ProxyOozieClient client = OozieClientFactory.get(cluster);
+            OozieClient client = OozieClientFactory.get(cluster);
             List<CoordinatorJob> applicableCoords = getApplicableCoords(client, start, end,
                     bundles, lifeCycles);
             long unscheduledInstances = 0;
@@ -901,7 +900,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
         for (Map.Entry<String, List<BundleJob>> entry : bundlesMap.entrySet()) {
             String cluster = entry.getKey();
             List<BundleJob> bundles = entry.getValue();
-            ProxyOozieClient client = OozieClientFactory.get(cluster);
+            OozieClient client = OozieClientFactory.get(cluster);
             List<CoordinatorJob> applicableCoords =
                 getApplicableCoords(client, start, end, bundles, lifeCycles);
             List<CoordinatorAction> actions = new ArrayList<CoordinatorAction>();
@@ -947,7 +946,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
         return coord.getAppName().contains(LifeCycle.EVICTION.getTag().name());
     }
 
-    private void addCoordAction(ProxyOozieClient client, List<CoordinatorAction> actions, String actionId) {
+    private void addCoordAction(OozieClient client, List<CoordinatorAction> actions, String actionId) {
         CoordinatorAction coordActionInfo = null;
         try {
             coordActionInfo = client.getCoordActionInfo(actionId);
@@ -984,7 +983,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
         }
     }
 
-    private List<CoordinatorJob> getApplicableCoords(ProxyOozieClient client, Date start, Date end,
+    private List<CoordinatorJob> getApplicableCoords(OozieClient client, Date start, Date end,
                                                      List<BundleJob> bundles,
                                                      List<LifeCycle> lifeCycles) throws FalconException {
         List<CoordinatorJob> applicableCoords = new ArrayList<CoordinatorJob>();
@@ -1323,7 +1322,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
     @Override
     public void reRun(String cluster, String jobId, Properties props, boolean isForced) throws FalconException {
 
-        ProxyOozieClient client = OozieClientFactory.get(cluster);
+        OozieClient client = OozieClientFactory.get(cluster);
         try {
             WorkflowJob jobInfo = client.getJobInfo(jobId);
             Properties jobprops = OozieUtils.toProperties(jobInfo.getConf());
@@ -1385,7 +1384,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
     @Override
     public String getWorkflowStatus(String cluster, String jobId) throws FalconException {
 
-        ProxyOozieClient client = OozieClientFactory.get(cluster);
+        OozieClient client = OozieClientFactory.get(cluster);
         try {
             if (jobId.endsWith("-W")) {
                 WorkflowJob jobInfo = client.getJobInfo(jobId);
@@ -1489,7 +1488,7 @@ public class OozieWorkflowEngine extends AbstractWorkflowEngine {
 
         // assert that its really changed
         try {
-            ProxyOozieClient client = OozieClientFactory.get(cluster);
+            OozieClient client = OozieClientFactory.get(cluster);
             CoordinatorJob coord = client.getCoordJobInfo(id);
             for (int counter = 0; counter < 3; counter++) {
                 Date intendedPauseTime = (StringUtils.isEmpty(pauseTime) ? null : SchemaHelper.parseDateUTC(pauseTime));

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/oozie/src/main/java/org/apache/oozie/client/LocalOozieClientBundle.java
----------------------------------------------------------------------
diff --git a/oozie/src/main/java/org/apache/oozie/client/LocalOozieClientBundle.java b/oozie/src/main/java/org/apache/oozie/client/LocalOozieClientBundle.java
new file mode 100644
index 0000000..93b4337
--- /dev/null
+++ b/oozie/src/main/java/org/apache/oozie/client/LocalOozieClientBundle.java
@@ -0,0 +1,382 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.oozie.client;
+
+import org.apache.oozie.BaseEngineException;
+import org.apache.oozie.BundleEngine;
+import org.apache.oozie.BundleEngineException;
+import org.apache.oozie.BundleJobBean;
+import org.apache.oozie.BundleJobInfo;
+import org.apache.oozie.ErrorCode;
+import org.apache.oozie.util.XConfiguration;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+
+
+/**
+ * Client API to submit and manage Oozie bundle jobs against an Oozie
+ * intance.
+ */
+public class LocalOozieClientBundle extends OozieClient {
+
+    private final BundleEngine bundleEngine;
+
+    /**
+     * Create a bundle client for Oozie local use.
+     * <p/>
+     *
+     * @param bundleEngine the engine instance to use.
+     */
+    public LocalOozieClientBundle(BundleEngine bundleEngine) {
+        this.bundleEngine = bundleEngine;
+    }
+
+    /**
+     * Return the Oozie URL of the bundle client instance.
+     * <p/>
+     * This URL is the base URL for the Oozie system, without protocol
+     * versioning.
+     *
+     * @return the Oozie URL of the bundle client instance.
+     */
+    @Override
+    public String getOozieUrl() {
+        return "localoozie";
+    }
+
+    /**
+     * Return the Oozie URL used by the client and server for WS communications.
+     * <p/>
+     * This URL is the original URL plus the versioning element path.
+     *
+     * @return the Oozie URL used by the client and server for communication.
+     * @throws OozieClientException thrown in the client
+     *                              and the server are not protocol compatible.
+     */
+    @Override
+    public String getProtocolUrl() throws OozieClientException {
+        return "localoozie";
+    }
+
+    /**
+     * Validate that the Oozie client and server instances are protocol
+     * compatible.
+     *
+     * @throws OozieClientException thrown in the client
+     *                              and the server are not protocol compatible.
+     */
+    @Override
+    public synchronized void validateWSVersion() throws OozieClientException {
+    }
+
+    /**
+     * Create an empty configuration with just the {@link #USER_NAME} set to the
+     * JVM user name and the {@link #GROUP_NAME} set to 'other'.
+     *
+     * @return an empty configuration.
+     */
+    @Override
+    public Properties createConfiguration() {
+        Properties conf = new Properties();
+        if (bundleEngine != null) {
+            conf.setProperty(USER_NAME, bundleEngine.getUser());
+        }
+        return conf;
+    }
+
+    /**
+     * Set a HTTP header to be used in the WS requests by the bundle
+     * instance.
+     *
+     * @param name  header name.
+     * @param value header value.
+     */
+    @Override
+    public void setHeader(String name, String value) {
+    }
+
+    /**
+     * Get the value of a set HTTP header from the bundle instance.
+     *
+     * @param name header name.
+     * @return header value, <code>null</code> if not set.
+     */
+    @Override
+    public String getHeader(String name) {
+        return null;
+    }
+
+    /**
+     * Remove a HTTP header from the bundle client instance.
+     *
+     * @param name header name.
+     */
+    @Override
+    public void removeHeader(String name) {
+    }
+
+    /**
+     * Return an iterator with all the header names set in the bundle
+     * instance.
+     *
+     * @return header names.
+     */
+    @Override
+    @SuppressWarnings("unchecked")
+    public Iterator<String> getHeaderNames() {
+        return Collections.EMPTY_SET.iterator();
+    }
+
+    /**
+     * Submit a bundle job.
+     *
+     * @param conf job configuration.
+     * @return the job Id.
+     * @throws OozieClientException thrown if the job
+     *                              could not be submitted.
+     */
+    @Override
+    public String submit(Properties conf) throws OozieClientException {
+        try {
+            return bundleEngine.submitJob(new XConfiguration(conf), false);
+        } catch (BundleEngineException ex) {
+            throw new OozieClientException(ex.getErrorCode().toString(), ex);
+        }
+    }
+
+    /**
+     * Start a bundle job.
+     *
+     * @param jobId job Id.
+     * @throws OozieClientException thrown if the job
+     *                              could not be started.
+     */
+    @Override
+    @Deprecated
+    public void start(String jobId) throws OozieClientException {
+        try {
+            bundleEngine.start(jobId);
+        } catch (BundleEngineException ex) {
+            throw new OozieClientException(ex.getErrorCode().toString(), ex);
+        } catch (BaseEngineException bex) {
+            throw new OozieClientException(bex.getErrorCode().toString(), bex);
+        }
+    }
+
+    /**
+     * Submit and start a bundle job.
+     *
+     * @param conf job configuration.
+     * @return the job Id.
+     * @throws OozieClientException thrown if the job
+     *                              could not be submitted.
+     */
+    @Override
+    public String run(Properties conf) throws OozieClientException {
+        try {
+            return bundleEngine.submitJob(new XConfiguration(conf), true);
+        } catch (BundleEngineException ex) {
+            throw new OozieClientException(ex.getErrorCode().toString(), ex);
+        }
+    }
+
+    /**
+     * Rerun a workflow job.
+     *
+     * @param jobId job Id to rerun.
+     * @param conf  configuration information for the rerun.
+     * @throws OozieClientException thrown if the job
+     *                              could not be started.
+     */
+    @Override
+    @Deprecated
+    public void reRun(String jobId, Properties conf) throws OozieClientException {
+        throw new OozieClientException(ErrorCode.E0301.toString(), "no-op");
+    }
+
+    /**
+     * Rerun bundle coordinators.
+     *
+     * @param jobId      bundle jobId
+     * @param coordScope rerun scope for coordinator jobs
+     * @param dateScope  rerun scope for date
+     * @param refresh    true if -refresh is given in command option
+     * @param noCleanup  true if -nocleanup is given in command option
+     * @throws OozieClientException
+     */
+    @Override
+    public Void reRunBundle(String jobId, String coordScope, String dateScope, boolean refresh,
+                            boolean noCleanup) throws OozieClientException {
+        try {
+            new BundleEngine().reRun(jobId, coordScope, dateScope, refresh, noCleanup);
+        } catch (BaseEngineException e) {
+            throw new OozieClientException(e.getErrorCode().toString(), e);
+        }
+        return null;
+    }
+
+    /**
+     * Suspend a bundle job.
+     *
+     * @param jobId job Id.
+     * @throws OozieClientException thrown if the job
+     *                              could not be suspended.
+     */
+    @Override
+    public void suspend(String jobId) throws OozieClientException {
+        try {
+            bundleEngine.suspend(jobId);
+        } catch (BundleEngineException ex) {
+            throw new OozieClientException(ex.getErrorCode().toString(), ex);
+        }
+    }
+
+    /**
+     * Resume a bundle job.
+     *
+     * @param jobId job Id.
+     * @throws OozieClientException thrown if the job
+     *                              could not be resume.
+     */
+    @Override
+    public void resume(String jobId) throws OozieClientException {
+        try {
+            bundleEngine.resume(jobId);
+        } catch (BundleEngineException ex) {
+            throw new OozieClientException(ex.getErrorCode().toString(), ex);
+        }
+    }
+
+    /**
+     * Kill a bundle job.
+     *
+     * @param jobId job Id.
+     * @throws OozieClientException thrown if the job
+     *                              could not be killed.
+     */
+    @Override
+    public void kill(String jobId) throws OozieClientException {
+        try {
+            bundleEngine.kill(jobId);
+        } catch (BundleEngineException ex) {
+            throw new OozieClientException(ex.getErrorCode().toString(), ex);
+        }
+    }
+
+    /**
+     * Get the info of a workflow job.
+     *
+     * @param jobId job Id.
+     * @return the job info.
+     * @throws OozieClientException thrown if the job
+     *                              info could not be retrieved.
+     */
+    @Override
+    @Deprecated
+    public WorkflowJob getJobInfo(String jobId) throws OozieClientException {
+        throw new OozieClientException(ErrorCode.E0301.toString(), "no-op");
+    }
+
+    /**
+     * Get the info of a bundle job.
+     *
+     * @param jobId job Id.
+     * @return the job info.
+     * @throws OozieClientException thrown if the job
+     *                              info could not be retrieved.
+     */
+    @Override
+    public BundleJob getBundleJobInfo(String jobId) throws OozieClientException {
+        try {
+            return bundleEngine.getBundleJob(jobId);
+        } catch (BundleEngineException ex) {
+            throw new OozieClientException(ex.getErrorCode().toString(), ex);
+        } catch (BaseEngineException bex) {
+            throw new OozieClientException(bex.getErrorCode().toString(), bex);
+        }
+    }
+
+    /**
+     * Return the info of the workflow jobs that match the filter.
+     *
+     * @param filter job filter. Refer to the {@link OozieClient} for the filter
+     *               syntax.
+     * @param start  jobs offset, base 1.
+     * @param len    number of jobs to return.
+     * @return a list with the workflow jobs info, without node details.
+     * @throws OozieClientException thrown if the jobs info could not be
+     *                              retrieved.
+     */
+    @Override
+    @Deprecated
+    public List<WorkflowJob> getJobsInfo(String filter, int start, int len) throws OozieClientException {
+        throw new OozieClientException(ErrorCode.E0301.toString(), "no-op");
+    }
+
+    /**
+     * Return the info of the bundle jobs that match the filter.
+     *
+     * @param filter job filter. Refer to the {@link OozieClient} for the filter
+     *               syntax.
+     * @param start  jobs offset, base 1.
+     * @param len    number of jobs to return.
+     * @return a list with the coordinator jobs info
+     * @throws OozieClientException thrown if the jobs info could not be
+     *                              retrieved.
+     */
+    @Override
+    public List<BundleJob> getBundleJobsInfo(String filter, int start, int len) throws OozieClientException {
+        try {
+            start = (start < 1) ? 1 : start; // taken from oozie API
+            len = (len < 1) ? 50 : len;
+            BundleJobInfo info = bundleEngine.getBundleJobs(filter, start, len);
+            List<BundleJob> jobs = new ArrayList<BundleJob>();
+            List<BundleJobBean> jobBeans = info.getBundleJobs();
+            for (BundleJobBean jobBean : jobBeans) {
+                jobs.add(jobBean);
+            }
+            return jobs;
+
+        } catch (BundleEngineException ex) {
+            throw new OozieClientException(ex.getErrorCode().toString(), ex);
+        }
+    }
+
+    /**
+     * Return the info of the workflow jobs that match the filter.
+     * <p/>
+     * It returns the first 100 jobs that match the filter.
+     *
+     * @param filter job filter. Refer to the {@link org.apache.oozie.LocalOozieClient} for the
+     *               filter syntax.
+     * @return a list with the workflow jobs info, without node details.
+     * @throws OozieClientException thrown if the jobs
+     *                              info could not be retrieved.
+     */
+    @Override
+    @Deprecated
+    public List<WorkflowJob> getJobsInfo(String filter) throws OozieClientException {
+        throw new OozieClientException(ErrorCode.E0301.toString(), "no-op");
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/oozie/src/main/java/org/apache/oozie/client/LocalProxyOozieClient.java
----------------------------------------------------------------------
diff --git a/oozie/src/main/java/org/apache/oozie/client/LocalProxyOozieClient.java b/oozie/src/main/java/org/apache/oozie/client/LocalProxyOozieClient.java
new file mode 100644
index 0000000..217cec9
--- /dev/null
+++ b/oozie/src/main/java/org/apache/oozie/client/LocalProxyOozieClient.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.oozie.client;
+
+import org.apache.oozie.BundleEngine;
+import org.apache.oozie.LocalOozieClient;
+import org.apache.oozie.LocalOozieClientCoord;
+import org.apache.oozie.local.LocalOozie;
+import org.apache.oozie.service.BundleEngineService;
+import org.apache.oozie.service.Services;
+
+import java.io.PrintStream;
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * Oozie Client for Local Oozie.
+ */
+public class LocalProxyOozieClient extends OozieClient {
+
+    private static LocalOozieClientBundle localOozieClientBundle;
+    private static LocalOozieClientCoord localOozieClientCoord;
+    private static LocalOozieClient localOozieClient;
+    private static final BundleEngine BUNDLE_ENGINE = Services.get().
+            get(BundleEngineService.class).getBundleEngine(System.getProperty("user.name"));
+
+
+    private LocalOozieClientBundle getLocalOozieClientBundle() {
+        if (localOozieClientBundle == null) {
+            localOozieClientBundle = new LocalOozieClientBundle(BUNDLE_ENGINE);
+        }
+        return localOozieClientBundle;
+    }
+
+    private LocalOozieClient getLocalOozieClient() {
+        if (localOozieClient == null) {
+            localOozieClient = (LocalOozieClient) LocalOozie.getClient();
+        }
+        return localOozieClient;
+    }
+
+    private LocalOozieClientCoord getLocalOozieClientCoord() {
+        if (localOozieClientCoord == null) {
+            localOozieClientCoord = (LocalOozieClientCoord) LocalOozie.getCoordClient();
+        }
+        return localOozieClientCoord;
+    }
+
+    @Override
+    public BundleJob getBundleJobInfo(String jobId) throws OozieClientException {
+        return getLocalOozieClientBundle().getBundleJobInfo(jobId);
+    }
+
+    @Override
+    public List<BundleJob> getBundleJobsInfo(String filter, int start, int len) throws OozieClientException {
+        return getLocalOozieClientBundle().getBundleJobsInfo(filter, start, len);
+    }
+
+    public String run(Properties conf) throws OozieClientException {
+        return getLocalOozieClientBundle().run(conf);
+    }
+
+    @Override
+    public Void reRunBundle(final String jobId, final String coordScope, final String dateScope,
+                            final boolean refresh, final boolean noCleanup) throws OozieClientException {
+        return getLocalOozieClientBundle().reRunBundle(jobId, coordScope, dateScope, refresh, noCleanup);
+    }
+
+    @Override
+    public String dryrun(Properties conf) {
+        return null;
+    }
+
+    @Override
+    public CoordinatorAction getCoordActionInfo(String actionId) throws OozieClientException {
+        return getLocalOozieClientCoord().getCoordActionInfo(actionId);
+    }
+
+
+    @Override
+    public CoordinatorJob getCoordJobInfo(final String jobId) throws OozieClientException {
+        return getLocalOozieClientCoord().getCoordJobInfo(jobId);
+    }
+
+    @Override
+    public List<CoordinatorJob> getCoordJobsInfo(final String filter, final int start,
+                                                 final int len) throws OozieClientException {
+        return getLocalOozieClientCoord().getCoordJobsInfo(filter, start, len);
+    }
+
+    @Override
+    public CoordinatorJob getCoordJobInfo(final String jobId, final String filter,
+                                          final int start, final int len) throws OozieClientException {
+        return getLocalOozieClientCoord().getCoordJobInfo(jobId, filter, start, len);
+    }
+
+    @Override
+    public List<CoordinatorAction> reRunCoord(final String jobId, final String rerunType,
+                                              final String scope, final boolean refresh,
+                                              final boolean noCleanup) throws OozieClientException {
+        return getLocalOozieClientCoord().reRunCoord(jobId, rerunType, scope, refresh, noCleanup);
+    }
+
+    @Override
+    public List<WorkflowJob> getJobsInfo(final String filter) throws OozieClientException {
+        return getLocalOozieClientCoord().getJobsInfo(filter);
+    }
+
+    @Override
+    public List<WorkflowJob> getJobsInfo(final String filter, final int start,
+                                         final int len) throws OozieClientException {
+        return getLocalOozieClientCoord().getJobsInfo(filter, start, len);
+    }
+
+    @Override
+    public WorkflowJob getJobInfo(final String jobId) throws OozieClientException {
+        return getLocalOozieClient().getJobInfo(jobId);
+    }
+
+
+    @Override
+    public WorkflowAction getWorkflowActionInfo(final String actionId) throws OozieClientException {
+        return getLocalOozieClient().getWorkflowActionInfo(actionId);
+    }
+
+    @Override
+    public WorkflowJob getJobInfo(final String jobId, final int start, final int len) throws OozieClientException {
+        return getLocalOozieClient().getJobInfo(jobId, start, len);
+    }
+
+    @Override
+    public String getJobId(final String externalId) throws OozieClientException {
+        return getLocalOozieClient().getJobId(externalId);
+    }
+
+    @Override
+    public void reRun(String jobId, Properties conf) throws OozieClientException {
+        throw new IllegalStateException("Rerun not supported ");
+    }
+
+    @Override
+    public void suspend(String jobId) throws OozieClientException {
+        throw new IllegalStateException("Suspend not supported ");
+    }
+
+    @Override
+    public void resume(String jobId) throws OozieClientException {
+        throw new IllegalStateException("Resume not supported ");
+    }
+
+    @Override
+    public void kill(String jobId) throws OozieClientException {
+        throw new IllegalStateException("Kill not supported");
+    }
+
+    @Override
+    public void change(final String jobId, final String changeValue) throws OozieClientException {
+        throw new IllegalStateException("Change not supported");
+    }
+
+    @Override
+    public void getJobLog(final String jobId, final String logRetrievalType,
+                          final String logRetrievalScope, final PrintStream ps) throws OozieClientException {
+        throw new IllegalStateException("Job logs not supported");
+    }
+
+    @Override
+    public String getJobLog(final String jobId) throws OozieClientException {
+        throw new IllegalStateException("Job logs not supported");
+    }
+
+}
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 31997e8..34a5471 100644
--- a/pom.xml
+++ b/pom.xml
@@ -390,6 +390,7 @@
         <module>archival</module>
         <module>rerun</module>
         <module>prism</module>
+        <module>unit</module>
         <module>webapp</module>
         <module>docs</module>
     </modules>

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/pom.xml
----------------------------------------------------------------------
diff --git a/unit/pom.xml b/unit/pom.xml
new file mode 100644
index 0000000..ae92687
--- /dev/null
+++ b/unit/pom.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>falcon-main</artifactId>
+        <groupId>org.apache.falcon</groupId>
+        <version>0.7-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>falcon-unit</artifactId>
+
+    <dependencies>
+
+        <dependency>
+            <groupId>org.apache.falcon</groupId>
+            <artifactId>falcon-common</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hadoop</groupId>
+            <artifactId>hadoop-client</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.oozie</groupId>
+            <artifactId>oozie-core</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.testng</groupId>
+            <artifactId>testng</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.falcon</groupId>
+            <artifactId>falcon-client</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.falcon</groupId>
+            <artifactId>falcon-hadoop-dependencies</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.falcon</groupId>
+            <artifactId>falcon-oozie-el-extension</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.falcon</groupId>
+            <artifactId>falcon-oozie-adaptor</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.falcon</groupId>
+            <artifactId>falcon-prism</artifactId>
+            <classifier>classes</classifier>
+            <version>${project.version}</version>
+        </dependency>
+
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <configuration>
+                    <source>1.7</source>
+                    <target>1.7</target>
+                </configuration>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <version>2.4</version>
+                <configuration>
+                    <excludes>
+                        <exclude>**/log4j.xml</exclude>
+                    </excludes>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/java/org/apache/falcon/unit/FalconUnit.java
----------------------------------------------------------------------
diff --git a/unit/src/main/java/org/apache/falcon/unit/FalconUnit.java b/unit/src/main/java/org/apache/falcon/unit/FalconUnit.java
new file mode 100644
index 0000000..eebfa2e
--- /dev/null
+++ b/unit/src/main/java/org/apache/falcon/unit/FalconUnit.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.falcon.unit;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.falcon.FalconException;
+import org.apache.falcon.entity.store.ConfigurationStore;
+import org.apache.falcon.hadoop.JailedFileSystem;
+import org.apache.falcon.security.CurrentUser;
+import org.apache.falcon.service.ServiceInitializer;
+import org.apache.falcon.util.RuntimeProperties;
+import org.apache.falcon.util.StartupProperties;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.oozie.local.LocalOozie;
+import org.apache.oozie.service.Services;
+import org.apache.oozie.util.XConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * FalconUnit runs jobs in an Local Mode and Cluster mode . <p/> Falon Unit is meant for development/debugging purposes
+ * only.
+ */
+public final class FalconUnit {
+
+    private static final Logger LOG = LoggerFactory.getLogger(FalconUnit.class);
+    private static final String OOZIE_SITE_XML = "oozie-site.xml";
+    private static final String OOZIE_DEFAULT_XML = "oozie-default.xml";
+    private static final String STORAGE_URL = "jail://global:00";
+    private static final String OOZIE_HOME_DIR = "/tmp/oozie-" + System.getProperty("user.name");
+
+    private static JailedFileSystem jailedFileSystem = new JailedFileSystem();
+    private static final ServiceInitializer STARTUP_SERVICES = new ServiceInitializer();
+    private static Map<String, String> sysProps;
+    private static FalconUnitClient falconUnitClient;
+    private static boolean isLocalMode;
+    private static boolean isFalconUnitActive = false;
+
+    private FalconUnit() {
+    }
+
+
+    public static synchronized void start(boolean isLocal) throws FalconException, IOException {
+        if (isFalconUnitActive) {
+            throw new IllegalStateException("Falcon Unit is already initialized");
+        }
+        isLocalMode = isLocal;
+        //Initialize Startup and runtime properties
+        LOG.info("Initializing startup properties ...");
+        StartupProperties.get();
+
+        LOG.info("Initializing runtime properties ...");
+        RuntimeProperties.get();
+
+        //Initializing Services
+        STARTUP_SERVICES.initialize();
+        ConfigurationStore.get();
+
+        if (isLocalMode) {
+            setupOozieConfigs();
+            initFileSystem();
+        }
+        isFalconUnitActive = true;
+
+    }
+
+    private static void initFileSystem() throws IOException {
+        Configuration conf = new Configuration();
+        conf.set("fs.defaultFS", STORAGE_URL);
+        jailedFileSystem.initialize(LocalFileSystem.getDefaultUri(conf), conf);
+    }
+
+    private static void setupOozieConfigs() throws IOException {
+        sysProps = new HashMap<>();
+        String oozieHomeDir = OOZIE_HOME_DIR;
+        String oozieConfDir = oozieHomeDir + "/conf";
+        String oozieHadoopConfDir = oozieConfDir + "/hadoop-conf";
+        String oozieActionConfDir = oozieConfDir + "/action-conf";
+        String oozieLogsDir = oozieHomeDir + "/logs";
+        String oozieDataDir = oozieHomeDir + "/data";
+
+        LocalFileSystem fs = new LocalFileSystem();
+        fs.mkdirs(new Path(oozieHomeDir));
+        fs.mkdirs(new Path(oozieConfDir));
+        fs.mkdirs(new Path(oozieHadoopConfDir));
+        fs.mkdirs(new Path(oozieActionConfDir));
+        fs.mkdirs(new Path(oozieLogsDir));
+        fs.close();
+
+        setSystemProperty("oozie.home.dir", oozieHomeDir);
+        setSystemProperty("oozie.data.dir", oozieDataDir);
+        setSystemProperty("oozie.action.conf", oozieActionConfDir);
+        setSystemProperty("oozie.log.dir", oozieLogsDir);
+        setSystemProperty("oozie.log4j.file", "localoozie-log4j.properties");
+        setSystemProperty("oozielocal.log", "oozieLogsDir/oozielocal.log");
+
+        Configuration oozieSiteConf = new Configuration(false);
+        ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+        InputStream oozieSiteInputStream = classLoader.getResourceAsStream(OOZIE_SITE_XML);
+        XConfiguration configuration = new XConfiguration(oozieSiteInputStream);
+        Properties props = configuration.toProperties();
+        for (String propName : props.stringPropertyNames()) {
+            oozieSiteConf.set(propName, props.getProperty(propName));
+        }
+        oozieSiteInputStream.close();
+
+        InputStream oozieDefaultInputStream = classLoader.getResourceAsStream(OOZIE_DEFAULT_XML);
+        configuration = new XConfiguration(oozieDefaultInputStream);
+        String classes = configuration.get(Services.CONF_SERVICE_CLASSES);
+        oozieSiteConf.set(Services.CONF_SERVICE_CLASSES, classes.replaceAll(
+                "org.apache.oozie.service.ShareLibService,", ""));
+        File target = new File(oozieConfDir, OOZIE_SITE_XML);
+        FileOutputStream outStream = null;
+        try {
+            outStream = new FileOutputStream(target);
+            oozieSiteConf.writeXml(outStream);
+        } finally {
+            if (outStream != null) {
+                outStream.close();
+            }
+        }
+        oozieDefaultInputStream.close();
+
+        CurrentUser.authenticate(System.getProperty("user.name"));
+    }
+
+    public static synchronized void cleanup() throws Exception {
+        STARTUP_SERVICES.destroy();
+        if (isLocalMode) {
+            cleanUpOozie();
+            jailedFileSystem.close();
+        }
+        isFalconUnitActive = false;
+    }
+
+    private static void cleanUpOozie() throws IOException, FalconException {
+        LocalOozie.stop();
+        FileUtils.deleteDirectory(new File(OOZIE_HOME_DIR));
+        resetSystemProperties();
+        System.setSecurityManager(null);
+    }
+
+    public static synchronized FalconUnitClient getClient() throws FalconException {
+        if (!isFalconUnitActive) {
+            throw new IllegalStateException("Falcon Unit is not initialized");
+        }
+        if (falconUnitClient == null) {
+            falconUnitClient = new FalconUnitClient();
+        }
+        return falconUnitClient;
+    }
+
+    public static FileSystem getFileSystem() throws IOException {
+        if (!isFalconUnitActive) {
+            throw new IllegalStateException("Falcon Unit is not initialized");
+        }
+        return jailedFileSystem;
+    }
+
+    // Setting System properties and store their actual values
+    private static void setSystemProperty(String name, String value) {
+        if (!sysProps.containsKey(name)) {
+            String currentValue = System.getProperty(name);
+            sysProps.put(name, currentValue);
+        }
+        if (value != null) {
+            System.setProperty(name, value);
+        } else {
+            System.getProperties().remove(name);
+        }
+    }
+
+
+    /**
+     * Reset changed system properties to their original values.
+     */
+    private static void resetSystemProperties() {
+        if (sysProps != null) {
+            for (Map.Entry<String, String> entry : sysProps.entrySet()) {
+                if (entry.getValue() != null) {
+                    System.setProperty(entry.getKey(), entry.getValue());
+                } else {
+                    System.getProperties().remove(entry.getKey());
+                }
+            }
+            sysProps.clear();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java
----------------------------------------------------------------------
diff --git a/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java b/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java
new file mode 100644
index 0000000..e898fc3
--- /dev/null
+++ b/unit/src/main/java/org/apache/falcon/unit/FalconUnitClient.java
@@ -0,0 +1,250 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.falcon.unit;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.falcon.FalconException;
+import org.apache.falcon.LifeCycle;
+import org.apache.falcon.client.AbstractFalconClient;
+import org.apache.falcon.client.FalconCLIException;
+import org.apache.falcon.entity.EntityUtil;
+import org.apache.falcon.entity.parser.EntityParser;
+import org.apache.falcon.entity.parser.EntityParserFactory;
+import org.apache.falcon.entity.store.ConfigurationStore;
+import org.apache.falcon.entity.v0.Entity;
+import org.apache.falcon.entity.v0.EntityType;
+import org.apache.falcon.entity.v0.SchemaHelper;
+import org.apache.falcon.entity.v0.feed.Feed;
+import org.apache.falcon.entity.v0.process.Cluster;
+import org.apache.falcon.entity.v0.process.Process;
+import org.apache.falcon.entity.v0.process.Validity;
+import org.apache.falcon.resource.APIResult;
+import org.apache.falcon.resource.InstancesResult;
+import org.apache.falcon.util.DateUtil;
+import org.apache.falcon.workflow.WorkflowEngineFactory;
+import org.apache.falcon.workflow.engine.AbstractWorkflowEngine;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.TimeZone;
+
+/**
+ * Client for Falcon Unit.
+ */
+public class FalconUnitClient extends AbstractFalconClient {
+
+    private static final Logger LOG = LoggerFactory.getLogger(FalconUnitClient.class);
+
+    protected ConfigurationStore configStore;
+    private AbstractWorkflowEngine workflowEngine;
+
+    public FalconUnitClient() throws FalconException {
+        configStore = ConfigurationStore.get();
+        workflowEngine = WorkflowEngineFactory.getWorkflowEngine();
+    }
+
+    public ConfigurationStore getConfigStore() {
+        return this.configStore;
+    }
+
+
+    /**
+     * Submit a new entity. Entities can be of type feed, process or data end
+     * points. Entity definitions are validated structurally against schema and
+     * subsequently for other rules before they are admitted into the system
+     *
+     * @param type     entity type
+     * @param filePath path for the definition of entity
+     * @return boolean
+     */
+    @Override
+    public APIResult submit(String type, String filePath) throws IOException, FalconCLIException {
+        try {
+            EntityType entityType = EntityType.getEnum(type);
+            InputStream entityStream = FalconUnitHelper.getFileInputStream(filePath);
+            EntityParser entityParser = EntityParserFactory.getParser(entityType);
+            Entity entity = entityParser.parse(entityStream);
+
+            Entity existingEntity = configStore.get(entityType, entity.getName());
+            if (existingEntity != null) {
+                if (EntityUtil.equals(existingEntity, entity)) {
+                    LOG.warn(entity.toShortString() + " already registered with same definition " + entity.getName());
+                    return new APIResult(APIResult.Status.SUCCEEDED, "{} already registered with same definition"
+                            + entity.getName());
+                }
+                LOG.warn(entity.toShortString() + " already registered with different definition "
+                        + "Can't be submitted again. Try removing before submitting.");
+                return new APIResult(APIResult.Status.FAILED, "{} already registered with different definition "
+                        + "Can't be submitted again. Try removing before submitting." + entity.getName());
+            }
+
+            entityParser.validate(entity);
+            configStore.publish(entityType, entity);
+            LOG.info("Submit successful: ({}): {}", entityType.name(), entity.getName());
+            return new APIResult(APIResult.Status.SUCCEEDED, "Submit successful (" + type + ") " + entity.getName());
+        } catch (FalconException e) {
+            throw new FalconCLIException("FAILED", e);
+        }
+    }
+
+    /**
+     * Schedules submitted entity.
+     *
+     * @param entityType entity Type
+     * @param entityName entity name
+     * @param cluster    cluster on which it has to be scheduled
+     * @return
+     * @throws FalconCLIException
+     * @throws FalconException
+     */
+    @Override
+    public APIResult schedule(EntityType entityType, String entityName, String cluster) throws FalconCLIException {
+        return schedule(entityType, entityName, null, 0, cluster);
+    }
+
+
+    /**
+     * Schedules an submitted process entity immediately.
+     *
+     * @param entityName   entity name
+     * @param startTime    start time for process while scheduling
+     * @param numInstances numInstances of process to be scheduled
+     * @param cluster      cluster on which process to be scheduled
+     * @return boolean
+     */
+    public APIResult schedule(EntityType entityType, String entityName, String startTime, int numInstances,
+                              String cluster) throws FalconCLIException {
+        try {
+            FalconUnitHelper.checkSchedulableEntity(entityType.toString());
+            Entity entity = EntityUtil.getEntity(entityType, entityName);
+            boolean clusterPresent = checkAndUpdateCluster(entity, entityType, cluster);
+            if (!clusterPresent) {
+                LOG.warn("Cluster is not registered with this entity " + entityName);
+                return new APIResult(APIResult.Status.FAILED, entity + "Cluster is not registered with this entity "
+                        + entityName);
+            }
+            if (StringUtils.isNotEmpty(startTime) && entityType == EntityType.PROCESS) {
+                updateStartAndEndTime((Process) entity, startTime, numInstances, cluster);
+            }
+            workflowEngine.schedule(entity);
+            LOG.info(entityName + " is scheduled successfully");
+            return new APIResult(APIResult.Status.SUCCEEDED, entity + "(" + "PROCESS" + ") scheduled successfully");
+        } catch (FalconException e) {
+            throw new FalconCLIException("FAILED", e);
+        }
+    }
+
+    /**
+     * Instance status for a given nominalTime.
+     *
+     * @param entityType  entity type
+     * @param entityName  entity name
+     * @param nominalTime nominal time of process
+     * @return InstancesResult.WorkflowStatus
+     */
+    public InstancesResult.WorkflowStatus getInstanceStatus(EntityType entityType, String entityName,
+                                                            String nominalTime) throws Exception {
+        if (entityType == EntityType.CLUSTER) {
+            throw new IllegalArgumentException("Instance management functions don't apply to Cluster entities");
+        }
+        Entity entityObject = EntityUtil.getEntity(entityType, entityName);
+        Date startTime = SchemaHelper.parseDateUTC(nominalTime);
+        Date endTime = DateUtil.getNextMinute(startTime);
+        List<LifeCycle> lifeCycles = FalconUnitHelper.checkAndUpdateLifeCycle(null, entityType.name());
+        InstancesResult instancesResult = workflowEngine.getStatus(entityObject, startTime, endTime, lifeCycles);
+        if (instancesResult.getInstances() != null && instancesResult.getInstances().length > 0
+                && instancesResult.getInstances()[0] != null) {
+            LOG.info("Instance status is " + instancesResult.getInstances()[0].getStatus());
+            return instancesResult.getInstances()[0].getStatus();
+        }
+        return null;
+    }
+
+    private boolean checkAndUpdateCluster(Entity entity, EntityType entityType, String cluster) {
+        if (entityType == EntityType.FEED) {
+            return checkAndUpdateFeedClusters(entity, cluster);
+        } else if (entityType == EntityType.PROCESS) {
+            return checkAndUpdateProcessClusters(entity, cluster);
+        } else {
+            throw new IllegalArgumentException("entity type {} is not supported " + entityType);
+        }
+    }
+
+    private boolean checkAndUpdateProcessClusters(Entity entity, String cluster) {
+        Process processEntity = (Process) entity;
+        List<Cluster> clusters = processEntity.getClusters().getClusters();
+        List<Cluster> newClusters = new ArrayList<>();
+        if (clusters != null) {
+            for (Cluster processCluster : clusters) {
+                if (processCluster.getName().equalsIgnoreCase(cluster)) {
+                    newClusters.add(processCluster);
+                }
+            }
+        }
+        if (newClusters.isEmpty()) {
+            LOG.warn("Cluster is not registered with this entity " + entity.getName());
+            return false;
+        }
+        processEntity.getClusters().getClusters().removeAll(clusters);
+        processEntity.getClusters().getClusters().addAll(newClusters);
+        return true;
+    }
+
+    private boolean checkAndUpdateFeedClusters(Entity entity, String cluster) {
+        Feed feedEntity = (Feed) entity;
+        List<org.apache.falcon.entity.v0.feed.Cluster> clusters = feedEntity.getClusters().getClusters();
+        List<org.apache.falcon.entity.v0.feed.Cluster> newClusters = new ArrayList<>();
+        if (clusters != null) {
+            for (org.apache.falcon.entity.v0.feed.Cluster feedClusters : clusters) {
+                if (feedClusters.getName().equalsIgnoreCase(cluster)) {
+                    newClusters.add(feedClusters);
+                }
+            }
+        }
+        if (newClusters.isEmpty()) {
+            LOG.warn("Cluster is not registered with this entity " + entity.getName());
+            return false;
+        }
+        feedEntity.getClusters().getClusters().removeAll(clusters);
+        feedEntity.getClusters().getClusters().addAll(newClusters);
+        return true;
+    }
+
+    private void updateStartAndEndTime(Process processEntity, String startTimeStr, int numInstances, String cluster) {
+        List<Cluster> clusters = processEntity.getClusters().getClusters();
+        if (clusters != null) {
+            for (Cluster processCluster : clusters) {
+                if (processCluster.getName().equalsIgnoreCase(cluster)) {
+                    Validity validity = new Validity();
+                    Date startTime = SchemaHelper.parseDateUTC(startTimeStr);
+                    validity.setStart(startTime);
+                    Date endTime = EntityUtil.getNextInstanceTime(startTime, processEntity.getFrequency(),
+                            TimeZone.getTimeZone("UTC"), numInstances);
+                    validity.setEnd(endTime);
+                    processCluster.setValidity(validity);
+                }
+            }
+        }
+    }
+}
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/java/org/apache/falcon/unit/FalconUnitHelper.java
----------------------------------------------------------------------
diff --git a/unit/src/main/java/org/apache/falcon/unit/FalconUnitHelper.java b/unit/src/main/java/org/apache/falcon/unit/FalconUnitHelper.java
new file mode 100644
index 0000000..604a3f9
--- /dev/null
+++ b/unit/src/main/java/org/apache/falcon/unit/FalconUnitHelper.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.falcon.unit;
+
+
+import org.apache.falcon.FalconException;
+import org.apache.falcon.LifeCycle;
+import org.apache.falcon.entity.v0.EntityType;
+import org.apache.falcon.entity.v0.UnschedulableEntityException;
+
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Utility class for Falcon Unit.
+ */
+public final class FalconUnitHelper {
+    private FalconUnitHelper() {
+    }
+
+    /**
+     * Converts a InputStream into FileInputStream.
+     *
+     * @param filePath - Path of file to stream
+     * @return ServletInputStream
+     * @throws org.apache.falcon.FalconException
+     */
+    public static InputStream getFileInputStream(String filePath) throws FalconException {
+        if (filePath == null) {
+            throw new IllegalArgumentException("file path should not be null");
+        }
+        InputStream stream;
+        try {
+            stream = new FileInputStream(filePath);
+        } catch (FileNotFoundException e) {
+            throw new FalconException("File not found: " + filePath);
+        }
+        return stream;
+    }
+
+    /**
+     * Updates lifecycle based on entity.
+     *
+     * @param lifeCycleValues
+     * @param type            entity type
+     * @return list of lifecycle values after check and update
+     */
+    public static List<LifeCycle> checkAndUpdateLifeCycle(List<LifeCycle> lifeCycleValues,
+                                                          String type) throws FalconException {
+        EntityType entityType = EntityType.getEnum(type);
+        if (lifeCycleValues == null || lifeCycleValues.isEmpty()) {
+            List<LifeCycle> lifeCycles = new ArrayList<LifeCycle>();
+            if (entityType == EntityType.PROCESS) {
+                lifeCycles.add(LifeCycle.valueOf(LifeCycle.EXECUTION.name()));
+            } else if (entityType == EntityType.FEED) {
+                lifeCycles.add(LifeCycle.valueOf(LifeCycle.REPLICATION.name()));
+            }
+            return lifeCycles;
+        }
+        for (LifeCycle lifeCycle : lifeCycleValues) {
+            if (entityType != lifeCycle.getTag().getType()) {
+                throw new FalconException("Incorrect lifecycle: " + lifeCycle + "for given type: " + type);
+            }
+        }
+        return lifeCycleValues;
+    }
+
+    /**
+     * Checks entity is schedulable or not.
+     *
+     * @param type
+     * @throws UnschedulableEntityException
+     */
+    public static void checkSchedulableEntity(String type) throws UnschedulableEntityException {
+        EntityType entityType = EntityType.getEnum(type);
+        if (!entityType.isSchedulable()) {
+            throw new UnschedulableEntityException(
+                    "Entity type (" + type + ") " + " cannot be Scheduled/Suspended/Resumed");
+        }
+    }
+}
+

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/java/org/apache/falcon/unit/LocalFalconClientProtocolProvider.java
----------------------------------------------------------------------
diff --git a/unit/src/main/java/org/apache/falcon/unit/LocalFalconClientProtocolProvider.java b/unit/src/main/java/org/apache/falcon/unit/LocalFalconClientProtocolProvider.java
new file mode 100644
index 0000000..060b662
--- /dev/null
+++ b/unit/src/main/java/org/apache/falcon/unit/LocalFalconClientProtocolProvider.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.falcon.unit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.LocalJobRunner;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+/**
+ * Local ClientProtocol provider for Hadoop.
+ */
+public class LocalFalconClientProtocolProvider extends ClientProtocolProvider {
+
+    private LocalJobRunner localJobRunner = null;
+    private static final String UNIT = "unit";
+
+    @Override
+    public ClientProtocol create(Configuration conf) throws IOException {
+        String framework = conf.get(MRConfig.FRAMEWORK_NAME, UNIT);
+        if (!UNIT.equals(framework)) {
+            return null;
+        }
+        return getLocalJobRunner(conf);
+    }
+
+    @Override
+    public ClientProtocol create(InetSocketAddress inetSocketAddress, Configuration conf) throws IOException {
+        return create(conf);
+    }
+
+    @Override
+    public void close(ClientProtocol clientProtocol) throws IOException {
+
+    }
+
+    private synchronized LocalJobRunner getLocalJobRunner(Configuration conf) throws IOException {
+        if (localJobRunner == null) {
+            localJobRunner = new LocalJobRunner(conf);
+        }
+        return localJobRunner;
+    }
+}

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
----------------------------------------------------------------------
diff --git a/unit/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider b/unit/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
new file mode 100644
index 0000000..2891352
--- /dev/null
+++ b/unit/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+ org.apache.falcon.unit.LocalFalconClientProtocolProvider
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/falcon/blob/3f00d051/unit/src/main/resources/core-site.xml
----------------------------------------------------------------------
diff --git a/unit/src/main/resources/core-site.xml b/unit/src/main/resources/core-site.xml
new file mode 100644
index 0000000..fd8550f
--- /dev/null
+++ b/unit/src/main/resources/core-site.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+    <property>
+        <name>fs.fsext.impl</name>
+        <value>org.apache.falcon.hadoop.FileSystemExtension</value>
+    </property>
+
+    <property>
+        <name>fs.defaultFS</name>
+        <value>jail://global:00</value>
+    </property>
+
+    <property>
+        <name>fs.jail.impl</name>
+        <value>org.apache.falcon.hadoop.JailedFileSystem</value>
+    </property>
+
+
+</configuration>